1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h"
11 #include "src/deoptimizer.h"
12 #include "src/execution.h"
13 #include "src/gdb-jit.h"
14 #include "src/global-handles.h"
15 #include "src/heap/incremental-marking.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/objects-visiting.h"
18 #include "src/heap/objects-visiting-inl.h"
19 #include "src/heap/spaces-inl.h"
20 #include "src/heap/sweeper-thread.h"
21 #include "src/heap-profiler.h"
22 #include "src/ic-inl.h"
23 #include "src/stub-cache.h"
29 const char* Marking::kWhiteBitPattern = "00";
30 const char* Marking::kBlackBitPattern = "10";
31 const char* Marking::kGreyBitPattern = "11";
32 const char* Marking::kImpossibleBitPattern = "01";
35 // -------------------------------------------------------------------------
36 // MarkCompactCollector
38 MarkCompactCollector::MarkCompactCollector(Heap* heap)
43 sweep_precisely_(false),
44 reduce_memory_footprint_(false),
45 abort_incremental_marking_(false),
46 marking_parity_(ODD_MARKING_PARITY),
48 was_marked_incrementally_(false),
49 sweeping_in_progress_(false),
50 pending_sweeper_jobs_semaphore_(0),
51 sequential_sweeping_(false),
52 migration_slots_buffer_(NULL),
55 have_code_to_deoptimize_(false) {
59 class VerifyMarkingVisitor : public ObjectVisitor {
61 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
63 void VisitPointers(Object** start, Object** end) {
64 for (Object** current = start; current < end; current++) {
65 if ((*current)->IsHeapObject()) {
66 HeapObject* object = HeapObject::cast(*current);
67 CHECK(heap_->mark_compact_collector()->IsMarked(object));
72 void VisitEmbeddedPointer(RelocInfo* rinfo) {
73 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
74 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
75 Object* p = rinfo->target_object();
80 void VisitCell(RelocInfo* rinfo) {
81 Code* code = rinfo->host();
82 DCHECK(rinfo->rmode() == RelocInfo::CELL);
83 if (!code->IsWeakObject(rinfo->target_cell())) {
84 ObjectVisitor::VisitCell(rinfo);
93 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
94 VerifyMarkingVisitor visitor(heap);
96 Address next_object_must_be_here_or_later = bottom;
98 for (Address current = bottom; current < top; current += kPointerSize) {
99 object = HeapObject::FromAddress(current);
100 if (MarkCompactCollector::IsMarked(object)) {
101 CHECK(current >= next_object_must_be_here_or_later);
102 object->Iterate(&visitor);
103 next_object_must_be_here_or_later = current + object->Size();
109 static void VerifyMarking(NewSpace* space) {
110 Address end = space->top();
111 NewSpacePageIterator it(space->bottom(), end);
112 // The bottom position is at the start of its page. Allows us to use
113 // page->area_start() as start of range on all pages.
114 CHECK_EQ(space->bottom(),
115 NewSpacePage::FromAddress(space->bottom())->area_start());
116 while (it.has_next()) {
117 NewSpacePage* page = it.next();
118 Address limit = it.has_next() ? page->area_end() : end;
119 CHECK(limit == end || !page->Contains(end));
120 VerifyMarking(space->heap(), page->area_start(), limit);
125 static void VerifyMarking(PagedSpace* space) {
126 PageIterator it(space);
128 while (it.has_next()) {
130 VerifyMarking(space->heap(), p->area_start(), p->area_end());
135 static void VerifyMarking(Heap* heap) {
136 VerifyMarking(heap->old_pointer_space());
137 VerifyMarking(heap->old_data_space());
138 VerifyMarking(heap->code_space());
139 VerifyMarking(heap->cell_space());
140 VerifyMarking(heap->property_cell_space());
141 VerifyMarking(heap->map_space());
142 VerifyMarking(heap->new_space());
144 VerifyMarkingVisitor visitor(heap);
146 LargeObjectIterator it(heap->lo_space());
147 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
148 if (MarkCompactCollector::IsMarked(obj)) {
149 obj->Iterate(&visitor);
153 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
157 class VerifyEvacuationVisitor : public ObjectVisitor {
159 void VisitPointers(Object** start, Object** end) {
160 for (Object** current = start; current < end; current++) {
161 if ((*current)->IsHeapObject()) {
162 HeapObject* object = HeapObject::cast(*current);
163 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
170 static void VerifyEvacuation(Page* page) {
171 VerifyEvacuationVisitor visitor;
172 HeapObjectIterator iterator(page, NULL);
173 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
174 heap_object = iterator.Next()) {
175 // We skip free space objects.
176 if (!heap_object->IsFiller()) {
177 heap_object->Iterate(&visitor);
183 static void VerifyEvacuation(NewSpace* space) {
184 NewSpacePageIterator it(space->bottom(), space->top());
185 VerifyEvacuationVisitor visitor;
187 while (it.has_next()) {
188 NewSpacePage* page = it.next();
189 Address current = page->area_start();
190 Address limit = it.has_next() ? page->area_end() : space->top();
191 CHECK(limit == space->top() || !page->Contains(space->top()));
192 while (current < limit) {
193 HeapObject* object = HeapObject::FromAddress(current);
194 object->Iterate(&visitor);
195 current += object->Size();
201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
202 if (!space->swept_precisely()) return;
203 if (FLAG_use_allocation_folding &&
204 (space == heap->old_pointer_space() || space == heap->old_data_space())) {
207 PageIterator it(space);
209 while (it.has_next()) {
211 if (p->IsEvacuationCandidate()) continue;
217 static void VerifyEvacuation(Heap* heap) {
218 VerifyEvacuation(heap, heap->old_pointer_space());
219 VerifyEvacuation(heap, heap->old_data_space());
220 VerifyEvacuation(heap, heap->code_space());
221 VerifyEvacuation(heap, heap->cell_space());
222 VerifyEvacuation(heap, heap->property_cell_space());
223 VerifyEvacuation(heap, heap->map_space());
224 VerifyEvacuation(heap->new_space());
226 VerifyEvacuationVisitor visitor;
227 heap->IterateStrongRoots(&visitor, VISIT_ALL);
229 #endif // VERIFY_HEAP
233 class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
235 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
237 void VisitPointers(Object** start, Object** end) {
238 for (Object** current = start; current < end; current++) {
239 if ((*current)->IsHeapObject()) {
240 HeapObject* object = HeapObject::cast(*current);
241 if (object->IsString()) continue;
242 switch (object->map()->instance_type()) {
243 case JS_FUNCTION_TYPE:
244 CheckContext(JSFunction::cast(object)->context());
246 case JS_GLOBAL_PROXY_TYPE:
247 CheckContext(JSGlobalProxy::cast(object)->native_context());
249 case JS_GLOBAL_OBJECT_TYPE:
250 case JS_BUILTINS_OBJECT_TYPE:
251 CheckContext(GlobalObject::cast(object)->native_context());
257 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
260 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
261 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
263 case FIXED_ARRAY_TYPE:
264 if (object->IsContext()) {
265 CheckContext(object);
267 FixedArray* array = FixedArray::cast(object);
268 int length = array->length();
269 // Set array length to zero to prevent cycles while iterating
270 // over array bodies, this is easier than intrusive marking.
271 array->set_length(0);
272 array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
274 array->set_length(length);
280 case TYPE_FEEDBACK_INFO_TYPE:
281 object->Iterate(this);
283 case DECLARED_ACCESSOR_INFO_TYPE:
284 case EXECUTABLE_ACCESSOR_INFO_TYPE:
285 case BYTE_ARRAY_TYPE:
286 case CALL_HANDLER_INFO_TYPE:
288 case FIXED_DOUBLE_ARRAY_TYPE:
289 case HEAP_NUMBER_TYPE:
290 case MUTABLE_HEAP_NUMBER_TYPE:
291 case INTERCEPTOR_INFO_TYPE:
294 case SHARED_FUNCTION_INFO_TYPE:
304 void CheckContext(Object* context) {
305 if (!context->IsContext()) return;
306 Context* native_context = Context::cast(context)->native_context();
307 if (current_native_context_ == NULL) {
308 current_native_context_ = native_context;
310 CHECK_EQ(current_native_context_, native_context);
314 Context* current_native_context_;
318 static void VerifyNativeContextSeparation(Heap* heap) {
319 HeapObjectIterator it(heap->code_space());
321 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
322 VerifyNativeContextSeparationVisitor visitor;
323 Code::cast(object)->CodeIterateBody(&visitor);
329 void MarkCompactCollector::SetUp() {
330 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
331 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
335 void MarkCompactCollector::TearDown() { AbortCompaction(); }
338 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
339 p->MarkEvacuationCandidate();
340 evacuation_candidates_.Add(p);
344 static void TraceFragmentation(PagedSpace* space) {
345 int number_of_pages = space->CountTotalPages();
346 intptr_t reserved = (number_of_pages * space->AreaSize());
347 intptr_t free = reserved - space->SizeOfObjects();
348 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
349 AllocationSpaceName(space->identity()), number_of_pages,
350 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
354 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
356 DCHECK(evacuation_candidates_.length() == 0);
358 #ifdef ENABLE_GDB_JIT_INTERFACE
359 // If GDBJIT interface is active disable compaction.
360 if (FLAG_gdbjit) return false;
363 CollectEvacuationCandidates(heap()->old_pointer_space());
364 CollectEvacuationCandidates(heap()->old_data_space());
366 if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
367 FLAG_incremental_code_compaction)) {
368 CollectEvacuationCandidates(heap()->code_space());
369 } else if (FLAG_trace_fragmentation) {
370 TraceFragmentation(heap()->code_space());
373 if (FLAG_trace_fragmentation) {
374 TraceFragmentation(heap()->map_space());
375 TraceFragmentation(heap()->cell_space());
376 TraceFragmentation(heap()->property_cell_space());
379 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
380 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
381 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
383 compacting_ = evacuation_candidates_.length() > 0;
390 void MarkCompactCollector::CollectGarbage() {
391 // Make sure that Prepare() has been called. The individual steps below will
392 // update the state as they proceed.
393 DCHECK(state_ == PREPARE_GC);
396 DCHECK(heap_->incremental_marking()->IsStopped());
398 if (FLAG_collect_maps) ClearNonLiveReferences();
400 ClearWeakCollections();
403 if (FLAG_verify_heap) {
404 VerifyMarking(heap_);
411 if (FLAG_verify_native_context_separation) {
412 VerifyNativeContextSeparation(heap_);
417 if (heap()->weak_embedded_objects_verification_enabled()) {
418 VerifyWeakEmbeddedObjectsInCode();
420 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
421 VerifyOmittedMapChecks();
427 if (marking_parity_ == EVEN_MARKING_PARITY) {
428 marking_parity_ = ODD_MARKING_PARITY;
430 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
431 marking_parity_ = EVEN_MARKING_PARITY;
437 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
438 PageIterator it(space);
440 while (it.has_next()) {
442 CHECK(p->markbits()->IsClean());
443 CHECK_EQ(0, p->LiveBytes());
448 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
449 NewSpacePageIterator it(space->bottom(), space->top());
451 while (it.has_next()) {
452 NewSpacePage* p = it.next();
453 CHECK(p->markbits()->IsClean());
454 CHECK_EQ(0, p->LiveBytes());
459 void MarkCompactCollector::VerifyMarkbitsAreClean() {
460 VerifyMarkbitsAreClean(heap_->old_pointer_space());
461 VerifyMarkbitsAreClean(heap_->old_data_space());
462 VerifyMarkbitsAreClean(heap_->code_space());
463 VerifyMarkbitsAreClean(heap_->cell_space());
464 VerifyMarkbitsAreClean(heap_->property_cell_space());
465 VerifyMarkbitsAreClean(heap_->map_space());
466 VerifyMarkbitsAreClean(heap_->new_space());
468 LargeObjectIterator it(heap_->lo_space());
469 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
470 MarkBit mark_bit = Marking::MarkBitFrom(obj);
471 CHECK(Marking::IsWhite(mark_bit));
472 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
477 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
478 HeapObjectIterator code_iterator(heap()->code_space());
479 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
480 obj = code_iterator.Next()) {
481 Code* code = Code::cast(obj);
482 if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
483 if (WillBeDeoptimized(code)) continue;
484 code->VerifyEmbeddedObjectsDependency();
489 void MarkCompactCollector::VerifyOmittedMapChecks() {
490 HeapObjectIterator iterator(heap()->map_space());
491 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
492 Map* map = Map::cast(obj);
493 map->VerifyOmittedMapChecks();
496 #endif // VERIFY_HEAP
499 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
500 PageIterator it(space);
502 while (it.has_next()) {
503 Bitmap::Clear(it.next());
508 static void ClearMarkbitsInNewSpace(NewSpace* space) {
509 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
511 while (it.has_next()) {
512 Bitmap::Clear(it.next());
517 void MarkCompactCollector::ClearMarkbits() {
518 ClearMarkbitsInPagedSpace(heap_->code_space());
519 ClearMarkbitsInPagedSpace(heap_->map_space());
520 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
521 ClearMarkbitsInPagedSpace(heap_->old_data_space());
522 ClearMarkbitsInPagedSpace(heap_->cell_space());
523 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
524 ClearMarkbitsInNewSpace(heap_->new_space());
526 LargeObjectIterator it(heap_->lo_space());
527 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
528 MarkBit mark_bit = Marking::MarkBitFrom(obj);
530 mark_bit.Next().Clear();
531 Page::FromAddress(obj->address())->ResetProgressBar();
532 Page::FromAddress(obj->address())->ResetLiveBytes();
537 class MarkCompactCollector::SweeperTask : public v8::Task {
539 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
541 virtual ~SweeperTask() {}
544 // v8::Task overrides.
545 virtual void Run() V8_OVERRIDE {
546 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
547 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
553 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
557 void MarkCompactCollector::StartSweeperThreads() {
558 DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
559 DCHECK(free_list_old_data_space_.get()->IsEmpty());
560 sweeping_in_progress_ = true;
561 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
562 isolate()->sweeper_threads()[i]->StartSweeping();
564 if (FLAG_job_based_sweeping) {
565 V8::GetCurrentPlatform()->CallOnBackgroundThread(
566 new SweeperTask(heap(), heap()->old_data_space()),
567 v8::Platform::kShortRunningTask);
568 V8::GetCurrentPlatform()->CallOnBackgroundThread(
569 new SweeperTask(heap(), heap()->old_pointer_space()),
570 v8::Platform::kShortRunningTask);
575 void MarkCompactCollector::EnsureSweepingCompleted() {
576 DCHECK(sweeping_in_progress_ == true);
578 // If sweeping is not completed, we try to complete it here. If we do not
579 // have sweeper threads we have to complete since we do not have a good
580 // indicator for a swept space in that case.
581 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
582 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
583 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
586 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
587 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
589 if (FLAG_job_based_sweeping) {
590 // Wait twice for both jobs.
591 pending_sweeper_jobs_semaphore_.Wait();
592 pending_sweeper_jobs_semaphore_.Wait();
594 ParallelSweepSpacesComplete();
595 sweeping_in_progress_ = false;
596 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
597 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
598 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
599 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
602 if (FLAG_verify_heap) {
603 VerifyEvacuation(heap_);
609 bool MarkCompactCollector::IsSweepingCompleted() {
610 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
611 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
616 if (FLAG_job_based_sweeping) {
617 if (!pending_sweeper_jobs_semaphore_.WaitFor(
618 base::TimeDelta::FromSeconds(0))) {
621 pending_sweeper_jobs_semaphore_.Signal();
628 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
631 if (space == heap()->old_pointer_space()) {
632 free_list = free_list_old_pointer_space_.get();
633 } else if (space == heap()->old_data_space()) {
634 free_list = free_list_old_data_space_.get();
636 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
637 // to only refill them for old data and pointer spaces.
641 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
642 space->AddToAccountingStats(freed_bytes);
643 space->DecrementUnsweptFreeBytes(freed_bytes);
647 bool MarkCompactCollector::AreSweeperThreadsActivated() {
648 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
652 void Marking::TransferMark(Address old_start, Address new_start) {
653 // This is only used when resizing an object.
654 DCHECK(MemoryChunk::FromAddress(old_start) ==
655 MemoryChunk::FromAddress(new_start));
657 if (!heap_->incremental_marking()->IsMarking()) return;
659 // If the mark doesn't move, we don't check the color of the object.
660 // It doesn't matter whether the object is black, since it hasn't changed
661 // size, so the adjustment to the live data count will be zero anyway.
662 if (old_start == new_start) return;
664 MarkBit new_mark_bit = MarkBitFrom(new_start);
665 MarkBit old_mark_bit = MarkBitFrom(old_start);
668 ObjectColor old_color = Color(old_mark_bit);
671 if (Marking::IsBlack(old_mark_bit)) {
672 old_mark_bit.Clear();
673 DCHECK(IsWhite(old_mark_bit));
674 Marking::MarkBlack(new_mark_bit);
676 } else if (Marking::IsGrey(old_mark_bit)) {
677 old_mark_bit.Clear();
678 old_mark_bit.Next().Clear();
679 DCHECK(IsWhite(old_mark_bit));
680 heap_->incremental_marking()->WhiteToGreyAndPush(
681 HeapObject::FromAddress(new_start), new_mark_bit);
682 heap_->incremental_marking()->RestartIfNotMarking();
686 ObjectColor new_color = Color(new_mark_bit);
687 DCHECK(new_color == old_color);
692 const char* AllocationSpaceName(AllocationSpace space) {
696 case OLD_POINTER_SPACE:
697 return "OLD_POINTER_SPACE";
699 return "OLD_DATA_SPACE";
706 case PROPERTY_CELL_SPACE:
707 return "PROPERTY_CELL_SPACE";
718 // Returns zero for pages that have so little fragmentation that it is not
719 // worth defragmenting them. Otherwise a positive integer that gives an
720 // estimate of fragmentation on an arbitrary scale.
721 static int FreeListFragmentation(PagedSpace* space, Page* p) {
722 // If page was not swept then there are no free list items on it.
723 if (!p->WasSwept()) {
724 if (FLAG_trace_fragmentation) {
725 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
726 AllocationSpaceName(space->identity()), p->LiveBytes());
731 PagedSpace::SizeStats sizes;
732 space->ObtainFreeListStatistics(p, &sizes);
735 intptr_t ratio_threshold;
736 intptr_t area_size = space->AreaSize();
737 if (space->identity() == CODE_SPACE) {
738 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
739 ratio_threshold = 10;
741 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
742 ratio_threshold = 15;
745 if (FLAG_trace_fragmentation) {
746 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
747 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
748 static_cast<int>(sizes.small_size_),
749 static_cast<double>(sizes.small_size_ * 100) / area_size,
750 static_cast<int>(sizes.medium_size_),
751 static_cast<double>(sizes.medium_size_ * 100) / area_size,
752 static_cast<int>(sizes.large_size_),
753 static_cast<double>(sizes.large_size_ * 100) / area_size,
754 static_cast<int>(sizes.huge_size_),
755 static_cast<double>(sizes.huge_size_ * 100) / area_size,
756 (ratio > ratio_threshold) ? "[fragmented]" : "");
759 if (FLAG_always_compact && sizes.Total() != area_size) {
763 if (ratio <= ratio_threshold) return 0; // Not fragmented.
765 return static_cast<int>(ratio - ratio_threshold);
769 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
770 DCHECK(space->identity() == OLD_POINTER_SPACE ||
771 space->identity() == OLD_DATA_SPACE ||
772 space->identity() == CODE_SPACE);
774 static const int kMaxMaxEvacuationCandidates = 1000;
775 int number_of_pages = space->CountTotalPages();
776 int max_evacuation_candidates =
777 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
779 if (FLAG_stress_compaction || FLAG_always_compact) {
780 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
785 Candidate() : fragmentation_(0), page_(NULL) {}
786 Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
788 int fragmentation() { return fragmentation_; }
789 Page* page() { return page_; }
796 enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
798 CompactionMode mode = COMPACT_FREE_LISTS;
800 intptr_t reserved = number_of_pages * space->AreaSize();
801 intptr_t over_reserved = reserved - space->SizeOfObjects();
802 static const intptr_t kFreenessThreshold = 50;
804 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
805 // If reduction of memory footprint was requested, we are aggressive
806 // about choosing pages to free. We expect that half-empty pages
807 // are easier to compact so slightly bump the limit.
808 mode = REDUCE_MEMORY_FOOTPRINT;
809 max_evacuation_candidates += 2;
813 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
814 // If over-usage is very high (more than a third of the space), we
815 // try to free all mostly empty pages. We expect that almost empty
816 // pages are even easier to compact so bump the limit even more.
817 mode = REDUCE_MEMORY_FOOTPRINT;
818 max_evacuation_candidates *= 2;
821 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
823 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
824 "evacuation candidate limit: %d\n",
825 static_cast<double>(over_reserved) / MB,
826 static_cast<double>(reserved) / MB,
827 static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
830 intptr_t estimated_release = 0;
832 Candidate candidates[kMaxMaxEvacuationCandidates];
834 max_evacuation_candidates =
835 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
838 int fragmentation = 0;
839 Candidate* least = NULL;
841 PageIterator it(space);
842 if (it.has_next()) it.next(); // Never compact the first page.
844 while (it.has_next()) {
846 p->ClearEvacuationCandidate();
848 if (FLAG_stress_compaction) {
849 unsigned int counter = space->heap()->ms_count();
850 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
851 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
852 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
853 // Don't try to release too many pages.
854 if (estimated_release >= over_reserved) {
858 intptr_t free_bytes = 0;
860 if (!p->WasSwept()) {
861 free_bytes = (p->area_size() - p->LiveBytes());
863 PagedSpace::SizeStats sizes;
864 space->ObtainFreeListStatistics(p, &sizes);
865 free_bytes = sizes.Total();
868 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
870 if (free_pct >= kFreenessThreshold) {
871 estimated_release += free_bytes;
872 fragmentation = free_pct;
877 if (FLAG_trace_fragmentation) {
878 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
879 AllocationSpaceName(space->identity()),
880 static_cast<int>(free_bytes),
881 static_cast<double>(free_bytes * 100) / p->area_size(),
882 (fragmentation > 0) ? "[fragmented]" : "");
885 fragmentation = FreeListFragmentation(space, p);
888 if (fragmentation != 0) {
889 if (count < max_evacuation_candidates) {
890 candidates[count++] = Candidate(fragmentation, p);
893 for (int i = 0; i < max_evacuation_candidates; i++) {
895 candidates[i].fragmentation() < least->fragmentation()) {
896 least = candidates + i;
900 if (least->fragmentation() < fragmentation) {
901 *least = Candidate(fragmentation, p);
908 for (int i = 0; i < count; i++) {
909 AddEvacuationCandidate(candidates[i].page());
912 if (count > 0 && FLAG_trace_fragmentation) {
913 PrintF("Collected %d evacuation candidates for space %s\n", count,
914 AllocationSpaceName(space->identity()));
919 void MarkCompactCollector::AbortCompaction() {
921 int npages = evacuation_candidates_.length();
922 for (int i = 0; i < npages; i++) {
923 Page* p = evacuation_candidates_[i];
924 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
925 p->ClearEvacuationCandidate();
926 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
929 evacuation_candidates_.Rewind(0);
930 invalidated_code_.Rewind(0);
932 DCHECK_EQ(0, evacuation_candidates_.length());
936 void MarkCompactCollector::Prepare() {
937 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
940 DCHECK(state_ == IDLE);
944 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
946 if (sweeping_in_progress()) {
947 // Instead of waiting we could also abort the sweeper threads here.
948 EnsureSweepingCompleted();
951 // Clear marking bits if incremental marking is aborted.
952 if (was_marked_incrementally_ && abort_incremental_marking_) {
953 heap()->incremental_marking()->Abort();
955 AbortWeakCollections();
957 was_marked_incrementally_ = false;
960 // Don't start compaction if we are in the middle of incremental
961 // marking cycle. We did not collect any slots.
962 if (!FLAG_never_compact && !was_marked_incrementally_) {
963 StartCompaction(NON_INCREMENTAL_COMPACTION);
966 PagedSpaces spaces(heap());
967 for (PagedSpace* space = spaces.next(); space != NULL;
968 space = spaces.next()) {
969 space->PrepareForMarkCompact();
973 if (!was_marked_incrementally_ && FLAG_verify_heap) {
974 VerifyMarkbitsAreClean();
980 void MarkCompactCollector::Finish() {
982 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
985 // The stub cache is not traversed during GC; clear the cache to
986 // force lazy re-initialization of it. This must be done after the
987 // GC, because it relies on the new address of certain old space
988 // objects (empty string, illegal builtin).
989 isolate()->stub_cache()->Clear();
991 if (have_code_to_deoptimize_) {
992 // Some code objects were marked for deoptimization during the GC.
993 Deoptimizer::DeoptimizeMarkedCode(isolate());
994 have_code_to_deoptimize_ = false;
999 // -------------------------------------------------------------------------
1000 // Phase 1: tracing and marking live objects.
1001 // before: all objects are in normal state.
1002 // after: a live object's map pointer is marked as '00'.
1004 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1005 // collection. Before marking, all objects are in their normal state. After
1006 // marking, live objects' map pointers are marked indicating that the object
1007 // has been found reachable.
1009 // The marking algorithm is a (mostly) depth-first (because of possible stack
1010 // overflow) traversal of the graph of objects reachable from the roots. It
1011 // uses an explicit stack of pointers rather than recursion. The young
1012 // generation's inactive ('from') space is used as a marking stack. The
1013 // objects in the marking stack are the ones that have been reached and marked
1014 // but their children have not yet been visited.
1016 // The marking stack can overflow during traversal. In that case, we set an
1017 // overflow flag. When the overflow flag is set, we continue marking objects
1018 // reachable from the objects on the marking stack, but no longer push them on
1019 // the marking stack. Instead, we mark them as both marked and overflowed.
1020 // When the stack is in the overflowed state, objects marked as overflowed
1021 // have been reached and marked but their children have not been visited yet.
1022 // After emptying the marking stack, we clear the overflow flag and traverse
1023 // the heap looking for objects marked as overflowed, push them on the stack,
1024 // and continue with marking. This process repeats until all reachable
1025 // objects have been marked.
1027 void CodeFlusher::ProcessJSFunctionCandidates() {
1028 Code* lazy_compile =
1029 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1030 Object* undefined = isolate_->heap()->undefined_value();
1032 JSFunction* candidate = jsfunction_candidates_head_;
1033 JSFunction* next_candidate;
1034 while (candidate != NULL) {
1035 next_candidate = GetNextCandidate(candidate);
1036 ClearNextCandidate(candidate, undefined);
1038 SharedFunctionInfo* shared = candidate->shared();
1040 Code* code = shared->code();
1041 MarkBit code_mark = Marking::MarkBitFrom(code);
1042 if (!code_mark.Get()) {
1043 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1044 PrintF("[code-flushing clears: ");
1045 shared->ShortPrint();
1046 PrintF(" - age: %d]\n", code->GetAge());
1048 shared->set_code(lazy_compile);
1049 candidate->set_code(lazy_compile);
1051 candidate->set_code(code);
1054 // We are in the middle of a GC cycle so the write barrier in the code
1055 // setter did not record the slot update and we have to do that manually.
1056 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1057 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1058 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
1061 Object** shared_code_slot =
1062 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1063 isolate_->heap()->mark_compact_collector()->RecordSlot(
1064 shared_code_slot, shared_code_slot, *shared_code_slot);
1066 candidate = next_candidate;
1069 jsfunction_candidates_head_ = NULL;
1073 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1074 Code* lazy_compile =
1075 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1077 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1078 SharedFunctionInfo* next_candidate;
1079 while (candidate != NULL) {
1080 next_candidate = GetNextCandidate(candidate);
1081 ClearNextCandidate(candidate);
1083 Code* code = candidate->code();
1084 MarkBit code_mark = Marking::MarkBitFrom(code);
1085 if (!code_mark.Get()) {
1086 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1087 PrintF("[code-flushing clears: ");
1088 candidate->ShortPrint();
1089 PrintF(" - age: %d]\n", code->GetAge());
1091 candidate->set_code(lazy_compile);
1094 Object** code_slot =
1095 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1096 isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
1099 candidate = next_candidate;
1102 shared_function_info_candidates_head_ = NULL;
1106 void CodeFlusher::ProcessOptimizedCodeMaps() {
1107 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1109 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1110 SharedFunctionInfo* next_holder;
1112 while (holder != NULL) {
1113 next_holder = GetNextCodeMap(holder);
1114 ClearNextCodeMap(holder);
1116 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1117 int new_length = SharedFunctionInfo::kEntriesStart;
1118 int old_length = code_map->length();
1119 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
1120 i += SharedFunctionInfo::kEntryLength) {
1122 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1123 if (!Marking::MarkBitFrom(code).Get()) continue;
1125 // Move every slot in the entry.
1126 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1127 int dst_index = new_length++;
1128 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1129 Object* object = code_map->get(i + j);
1130 code_map->set(dst_index, object);
1131 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1132 DCHECK(object->IsSmi());
1135 Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1136 isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1142 // Trim the optimized code map if entries have been removed.
1143 if (new_length < old_length) {
1144 holder->TrimOptimizedCodeMap(old_length - new_length);
1147 holder = next_holder;
1150 optimized_code_map_holder_head_ = NULL;
1154 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1155 // Make sure previous flushing decisions are revisited.
1156 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1158 if (FLAG_trace_code_flushing) {
1159 PrintF("[code-flushing abandons function-info: ");
1160 shared_info->ShortPrint();
1164 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1165 SharedFunctionInfo* next_candidate;
1166 if (candidate == shared_info) {
1167 next_candidate = GetNextCandidate(shared_info);
1168 shared_function_info_candidates_head_ = next_candidate;
1169 ClearNextCandidate(shared_info);
1171 while (candidate != NULL) {
1172 next_candidate = GetNextCandidate(candidate);
1174 if (next_candidate == shared_info) {
1175 next_candidate = GetNextCandidate(shared_info);
1176 SetNextCandidate(candidate, next_candidate);
1177 ClearNextCandidate(shared_info);
1181 candidate = next_candidate;
1187 void CodeFlusher::EvictCandidate(JSFunction* function) {
1188 DCHECK(!function->next_function_link()->IsUndefined());
1189 Object* undefined = isolate_->heap()->undefined_value();
1191 // Make sure previous flushing decisions are revisited.
1192 isolate_->heap()->incremental_marking()->RecordWrites(function);
1193 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1195 if (FLAG_trace_code_flushing) {
1196 PrintF("[code-flushing abandons closure: ");
1197 function->shared()->ShortPrint();
1201 JSFunction* candidate = jsfunction_candidates_head_;
1202 JSFunction* next_candidate;
1203 if (candidate == function) {
1204 next_candidate = GetNextCandidate(function);
1205 jsfunction_candidates_head_ = next_candidate;
1206 ClearNextCandidate(function, undefined);
1208 while (candidate != NULL) {
1209 next_candidate = GetNextCandidate(candidate);
1211 if (next_candidate == function) {
1212 next_candidate = GetNextCandidate(function);
1213 SetNextCandidate(candidate, next_candidate);
1214 ClearNextCandidate(function, undefined);
1218 candidate = next_candidate;
1224 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1225 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1226 ->get(SharedFunctionInfo::kNextMapIndex)
1229 // Make sure previous flushing decisions are revisited.
1230 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1232 if (FLAG_trace_code_flushing) {
1233 PrintF("[code-flushing abandons code-map: ");
1234 code_map_holder->ShortPrint();
1238 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1239 SharedFunctionInfo* next_holder;
1240 if (holder == code_map_holder) {
1241 next_holder = GetNextCodeMap(code_map_holder);
1242 optimized_code_map_holder_head_ = next_holder;
1243 ClearNextCodeMap(code_map_holder);
1245 while (holder != NULL) {
1246 next_holder = GetNextCodeMap(holder);
1248 if (next_holder == code_map_holder) {
1249 next_holder = GetNextCodeMap(code_map_holder);
1250 SetNextCodeMap(holder, next_holder);
1251 ClearNextCodeMap(code_map_holder);
1255 holder = next_holder;
1261 void CodeFlusher::EvictJSFunctionCandidates() {
1262 JSFunction* candidate = jsfunction_candidates_head_;
1263 JSFunction* next_candidate;
1264 while (candidate != NULL) {
1265 next_candidate = GetNextCandidate(candidate);
1266 EvictCandidate(candidate);
1267 candidate = next_candidate;
1269 DCHECK(jsfunction_candidates_head_ == NULL);
1273 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1274 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1275 SharedFunctionInfo* next_candidate;
1276 while (candidate != NULL) {
1277 next_candidate = GetNextCandidate(candidate);
1278 EvictCandidate(candidate);
1279 candidate = next_candidate;
1281 DCHECK(shared_function_info_candidates_head_ == NULL);
1285 void CodeFlusher::EvictOptimizedCodeMaps() {
1286 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1287 SharedFunctionInfo* next_holder;
1288 while (holder != NULL) {
1289 next_holder = GetNextCodeMap(holder);
1290 EvictOptimizedCodeMap(holder);
1291 holder = next_holder;
1293 DCHECK(optimized_code_map_holder_head_ == NULL);
1297 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1298 Heap* heap = isolate_->heap();
1300 JSFunction** slot = &jsfunction_candidates_head_;
1301 JSFunction* candidate = jsfunction_candidates_head_;
1302 while (candidate != NULL) {
1303 if (heap->InFromSpace(candidate)) {
1304 v->VisitPointer(reinterpret_cast<Object**>(slot));
1306 candidate = GetNextCandidate(*slot);
1307 slot = GetNextCandidateSlot(*slot);
1312 MarkCompactCollector::~MarkCompactCollector() {
1313 if (code_flusher_ != NULL) {
1314 delete code_flusher_;
1315 code_flusher_ = NULL;
1320 static inline HeapObject* ShortCircuitConsString(Object** p) {
1321 // Optimization: If the heap object pointed to by p is a non-internalized
1322 // cons string whose right substring is HEAP->empty_string, update
1323 // it in place to its left substring. Return the updated value.
1325 // Here we assume that if we change *p, we replace it with a heap object
1326 // (i.e., the left substring of a cons string is always a heap object).
1328 // The check performed is:
1329 // object->IsConsString() && !object->IsInternalizedString() &&
1330 // (ConsString::cast(object)->second() == HEAP->empty_string())
1331 // except the maps for the object and its possible substrings might be
1333 HeapObject* object = HeapObject::cast(*p);
1334 if (!FLAG_clever_optimizations) return object;
1335 Map* map = object->map();
1336 InstanceType type = map->instance_type();
1337 if (!IsShortcutCandidate(type)) return object;
1339 Object* second = reinterpret_cast<ConsString*>(object)->second();
1340 Heap* heap = map->GetHeap();
1341 if (second != heap->empty_string()) {
1345 // Since we don't have the object's start, it is impossible to update the
1346 // page dirty marks. Therefore, we only replace the string with its left
1347 // substring when page dirty marks do not change.
1348 Object* first = reinterpret_cast<ConsString*>(object)->first();
1349 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1352 return HeapObject::cast(first);
1356 class MarkCompactMarkingVisitor
1357 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1359 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
1362 static void ObjectStatsCountFixedArray(
1363 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1364 FixedArraySubInstanceType dictionary_type);
1366 template <MarkCompactMarkingVisitor::VisitorId id>
1367 class ObjectStatsTracker {
1369 static inline void Visit(Map* map, HeapObject* obj);
1372 static void Initialize();
1374 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1375 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1378 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1379 // Mark all objects pointed to in [start, end).
1380 const int kMinRangeForMarkingRecursion = 64;
1381 if (end - start >= kMinRangeForMarkingRecursion) {
1382 if (VisitUnmarkedObjects(heap, start, end)) return;
1383 // We are close to a stack overflow, so just mark the objects.
1385 MarkCompactCollector* collector = heap->mark_compact_collector();
1386 for (Object** p = start; p < end; p++) {
1387 MarkObjectByPointer(collector, start, p);
1391 // Marks the object black and pushes it on the marking stack.
1392 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1393 MarkBit mark = Marking::MarkBitFrom(object);
1394 heap->mark_compact_collector()->MarkObject(object, mark);
1397 // Marks the object black without pushing it on the marking stack.
1398 // Returns true if object needed marking and false otherwise.
1399 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1400 MarkBit mark_bit = Marking::MarkBitFrom(object);
1401 if (!mark_bit.Get()) {
1402 heap->mark_compact_collector()->SetMark(object, mark_bit);
1408 // Mark object pointed to by p.
1409 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1410 Object** anchor_slot, Object** p)) {
1411 if (!(*p)->IsHeapObject()) return;
1412 HeapObject* object = ShortCircuitConsString(p);
1413 collector->RecordSlot(anchor_slot, p, object);
1414 MarkBit mark = Marking::MarkBitFrom(object);
1415 collector->MarkObject(object, mark);
1419 // Visit an unmarked object.
1420 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1423 DCHECK(collector->heap()->Contains(obj));
1424 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1426 Map* map = obj->map();
1427 Heap* heap = obj->GetHeap();
1428 MarkBit mark = Marking::MarkBitFrom(obj);
1429 heap->mark_compact_collector()->SetMark(obj, mark);
1430 // Mark the map pointer and the body.
1431 MarkBit map_mark = Marking::MarkBitFrom(map);
1432 heap->mark_compact_collector()->MarkObject(map, map_mark);
1433 IterateBody(map, obj);
1436 // Visit all unmarked objects pointed to by [start, end).
1437 // Returns false if the operation fails (lack of stack space).
1438 INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1440 // Return false is we are close to the stack limit.
1441 StackLimitCheck check(heap->isolate());
1442 if (check.HasOverflowed()) return false;
1444 MarkCompactCollector* collector = heap->mark_compact_collector();
1445 // Visit the unmarked objects.
1446 for (Object** p = start; p < end; p++) {
1448 if (!o->IsHeapObject()) continue;
1449 collector->RecordSlot(start, p, o);
1450 HeapObject* obj = HeapObject::cast(o);
1451 MarkBit mark = Marking::MarkBitFrom(obj);
1452 if (mark.Get()) continue;
1453 VisitUnmarkedObject(collector, obj);
1460 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1462 // Code flushing support.
1464 static const int kRegExpCodeThreshold = 5;
1466 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1468 // Make sure that the fixed array is in fact initialized on the RegExp.
1469 // We could potentially trigger a GC when initializing the RegExp.
1470 if (HeapObject::cast(re->data())->map()->instance_type() !=
1474 // Make sure this is a RegExp that actually contains code.
1475 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1477 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1478 if (!code->IsSmi() &&
1479 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1480 // Save a copy that can be reinstated if we need the code again.
1481 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1483 // Saving a copy might create a pointer into compaction candidate
1484 // that was not observed by marker. This might happen if JSRegExp data
1485 // was marked through the compilation cache before marker reached JSRegExp
1487 FixedArray* data = FixedArray::cast(re->data());
1488 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1489 heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1491 // Set a number in the 0-255 range to guarantee no smi overflow.
1492 re->SetDataAt(JSRegExp::code_index(is_ascii),
1493 Smi::FromInt(heap->sweep_generation() & 0xff));
1494 } else if (code->IsSmi()) {
1495 int value = Smi::cast(code)->value();
1496 // The regexp has not been compiled yet or there was a compilation error.
1497 if (value == JSRegExp::kUninitializedValue ||
1498 value == JSRegExp::kCompilationErrorValue) {
1502 // Check if we should flush now.
1503 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1504 re->SetDataAt(JSRegExp::code_index(is_ascii),
1505 Smi::FromInt(JSRegExp::kUninitializedValue));
1506 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1507 Smi::FromInt(JSRegExp::kUninitializedValue));
1513 // Works by setting the current sweep_generation (as a smi) in the
1514 // code object place in the data array of the RegExp and keeps a copy
1515 // around that can be reinstated if we reuse the RegExp before flushing.
1516 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1517 // we flush the code.
1518 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1519 Heap* heap = map->GetHeap();
1520 MarkCompactCollector* collector = heap->mark_compact_collector();
1521 if (!collector->is_code_flushing_enabled()) {
1522 VisitJSRegExp(map, object);
1525 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1526 // Flush code or set age on both ASCII and two byte code.
1527 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1528 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1529 // Visit the fields of the RegExp, including the updated FixedArray.
1530 VisitJSRegExp(map, object);
1533 static VisitorDispatchTable<Callback> non_count_table_;
1537 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1538 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1539 FixedArraySubInstanceType dictionary_type) {
1540 Heap* heap = fixed_array->map()->GetHeap();
1541 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1542 fixed_array->map() != heap->fixed_double_array_map() &&
1543 fixed_array != heap->empty_fixed_array()) {
1544 if (fixed_array->IsDictionary()) {
1545 heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1547 heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1553 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1554 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1555 Heap* heap = map->GetHeap();
1556 int object_size = obj->Size();
1557 heap->RecordObjectStats(map->instance_type(), object_size);
1558 non_count_table_.GetVisitorById(id)(map, obj);
1559 if (obj->IsJSObject()) {
1560 JSObject* object = JSObject::cast(obj);
1561 ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1562 FAST_ELEMENTS_SUB_TYPE);
1563 ObjectStatsCountFixedArray(object->properties(),
1564 DICTIONARY_PROPERTIES_SUB_TYPE,
1565 FAST_PROPERTIES_SUB_TYPE);
1570 template <MarkCompactMarkingVisitor::VisitorId id>
1571 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
1573 ObjectStatsVisitBase(id, map, obj);
1578 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1579 MarkCompactMarkingVisitor::kVisitMap> {
1581 static inline void Visit(Map* map, HeapObject* obj) {
1582 Heap* heap = map->GetHeap();
1583 Map* map_obj = Map::cast(obj);
1584 DCHECK(map->instance_type() == MAP_TYPE);
1585 DescriptorArray* array = map_obj->instance_descriptors();
1586 if (map_obj->owns_descriptors() &&
1587 array != heap->empty_descriptor_array()) {
1588 int fixed_array_size = array->Size();
1589 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1592 if (map_obj->HasTransitionArray()) {
1593 int fixed_array_size = map_obj->transitions()->Size();
1594 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1597 if (map_obj->has_code_cache()) {
1598 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1599 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1600 cache->default_cache()->Size());
1601 if (!cache->normal_type_cache()->IsUndefined()) {
1602 heap->RecordFixedArraySubTypeStats(
1603 MAP_CODE_CACHE_SUB_TYPE,
1604 FixedArray::cast(cache->normal_type_cache())->Size());
1607 ObjectStatsVisitBase(kVisitMap, map, obj);
1613 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1614 MarkCompactMarkingVisitor::kVisitCode> {
1616 static inline void Visit(Map* map, HeapObject* obj) {
1617 Heap* heap = map->GetHeap();
1618 int object_size = obj->Size();
1619 DCHECK(map->instance_type() == CODE_TYPE);
1620 Code* code_obj = Code::cast(obj);
1621 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1623 ObjectStatsVisitBase(kVisitCode, map, obj);
1629 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1630 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1632 static inline void Visit(Map* map, HeapObject* obj) {
1633 Heap* heap = map->GetHeap();
1634 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1635 if (sfi->scope_info() != heap->empty_fixed_array()) {
1636 heap->RecordFixedArraySubTypeStats(
1637 SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1639 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1645 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1646 MarkCompactMarkingVisitor::kVisitFixedArray> {
1648 static inline void Visit(Map* map, HeapObject* obj) {
1649 Heap* heap = map->GetHeap();
1650 FixedArray* fixed_array = FixedArray::cast(obj);
1651 if (fixed_array == heap->string_table()) {
1652 heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1653 fixed_array->Size());
1655 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1660 void MarkCompactMarkingVisitor::Initialize() {
1661 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1663 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1665 if (FLAG_track_gc_object_stats) {
1666 // Copy the visitor table to make call-through possible.
1667 non_count_table_.CopyFrom(&table_);
1668 #define VISITOR_ID_COUNT_FUNCTION(id) \
1669 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1670 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1671 #undef VISITOR_ID_COUNT_FUNCTION
1676 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1677 MarkCompactMarkingVisitor::non_count_table_;
1680 class CodeMarkingVisitor : public ThreadVisitor {
1682 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1683 : collector_(collector) {}
1685 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1686 collector_->PrepareThreadForCodeFlushing(isolate, top);
1690 MarkCompactCollector* collector_;
1694 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1696 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1697 : collector_(collector) {}
1699 void VisitPointers(Object** start, Object** end) {
1700 for (Object** p = start; p < end; p++) VisitPointer(p);
1703 void VisitPointer(Object** slot) {
1704 Object* obj = *slot;
1705 if (obj->IsSharedFunctionInfo()) {
1706 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1707 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1708 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1709 collector_->MarkObject(shared->code(), code_mark);
1710 collector_->MarkObject(shared, shared_mark);
1715 MarkCompactCollector* collector_;
1719 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1720 ThreadLocalTop* top) {
1721 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1722 // Note: for the frame that has a pending lazy deoptimization
1723 // StackFrame::unchecked_code will return a non-optimized code object for
1724 // the outermost function and StackFrame::LookupCode will return
1725 // actual optimized code object.
1726 StackFrame* frame = it.frame();
1727 Code* code = frame->unchecked_code();
1728 MarkBit code_mark = Marking::MarkBitFrom(code);
1729 MarkObject(code, code_mark);
1730 if (frame->is_optimized()) {
1731 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1732 frame->LookupCode());
1738 void MarkCompactCollector::PrepareForCodeFlushing() {
1739 // Enable code flushing for non-incremental cycles.
1740 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1741 EnableCodeFlushing(!was_marked_incrementally_);
1744 // If code flushing is disabled, there is no need to prepare for it.
1745 if (!is_code_flushing_enabled()) return;
1747 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1748 // relies on it being marked before any other descriptor array.
1749 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1750 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1751 MarkObject(descriptor_array, descriptor_array_mark);
1753 // Make sure we are not referencing the code from the stack.
1754 DCHECK(this == heap()->mark_compact_collector());
1755 PrepareThreadForCodeFlushing(heap()->isolate(),
1756 heap()->isolate()->thread_local_top());
1758 // Iterate the archived stacks in all threads to check if
1759 // the code is referenced.
1760 CodeMarkingVisitor code_marking_visitor(this);
1761 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1762 &code_marking_visitor);
1764 SharedFunctionInfoMarkingVisitor visitor(this);
1765 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1766 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1768 ProcessMarkingDeque();
1772 // Visitor class for marking heap roots.
1773 class RootMarkingVisitor : public ObjectVisitor {
1775 explicit RootMarkingVisitor(Heap* heap)
1776 : collector_(heap->mark_compact_collector()) {}
1778 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
1780 void VisitPointers(Object** start, Object** end) {
1781 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1784 // Skip the weak next code link in a code object, which is visited in
1785 // ProcessTopOptimizedFrame.
1786 void VisitNextCodeLink(Object** p) {}
1789 void MarkObjectByPointer(Object** p) {
1790 if (!(*p)->IsHeapObject()) return;
1792 // Replace flat cons strings in place.
1793 HeapObject* object = ShortCircuitConsString(p);
1794 MarkBit mark_bit = Marking::MarkBitFrom(object);
1795 if (mark_bit.Get()) return;
1797 Map* map = object->map();
1799 collector_->SetMark(object, mark_bit);
1801 // Mark the map pointer and body, and push them on the marking stack.
1802 MarkBit map_mark = Marking::MarkBitFrom(map);
1803 collector_->MarkObject(map, map_mark);
1804 MarkCompactMarkingVisitor::IterateBody(map, object);
1806 // Mark all the objects reachable from the map and body. May leave
1807 // overflowed objects in the heap.
1808 collector_->EmptyMarkingDeque();
1811 MarkCompactCollector* collector_;
1815 // Helper class for pruning the string table.
1816 template <bool finalize_external_strings>
1817 class StringTableCleaner : public ObjectVisitor {
1819 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1821 virtual void VisitPointers(Object** start, Object** end) {
1822 // Visit all HeapObject pointers in [start, end).
1823 for (Object** p = start; p < end; p++) {
1825 if (o->IsHeapObject() &&
1826 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1827 if (finalize_external_strings) {
1828 DCHECK(o->IsExternalString());
1829 heap_->FinalizeExternalString(String::cast(*p));
1831 pointers_removed_++;
1833 // Set the entry to the_hole_value (as deleted).
1834 *p = heap_->the_hole_value();
1839 int PointersRemoved() {
1840 DCHECK(!finalize_external_strings);
1841 return pointers_removed_;
1846 int pointers_removed_;
1850 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1851 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1854 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1856 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1858 virtual Object* RetainAs(Object* object) {
1859 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1861 } else if (object->IsAllocationSite() &&
1862 !(AllocationSite::cast(object)->IsZombie())) {
1863 // "dead" AllocationSites need to live long enough for a traversal of new
1864 // space. These sites get a one-time reprieve.
1865 AllocationSite* site = AllocationSite::cast(object);
1867 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1876 // Fill the marking stack with overflowed objects returned by the given
1877 // iterator. Stop when the marking stack is filled or the end of the space
1878 // is reached, whichever comes first.
1880 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1881 MarkingDeque* marking_deque,
1883 // The caller should ensure that the marking stack is initially not full,
1884 // so that we don't waste effort pointlessly scanning for objects.
1885 DCHECK(!marking_deque->IsFull());
1887 Map* filler_map = heap->one_pointer_filler_map();
1888 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1889 MarkBit markbit = Marking::MarkBitFrom(object);
1890 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1891 Marking::GreyToBlack(markbit);
1892 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1893 marking_deque->PushBlack(object);
1894 if (marking_deque->IsFull()) return;
1900 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1903 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1905 DCHECK(!marking_deque->IsFull());
1906 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1907 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1908 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1909 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1911 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1912 Address cell_base = it.CurrentCellBase();
1913 MarkBit::CellType* cell = it.CurrentCell();
1915 const MarkBit::CellType current_cell = *cell;
1916 if (current_cell == 0) continue;
1918 MarkBit::CellType grey_objects;
1920 const MarkBit::CellType next_cell = *(cell + 1);
1921 grey_objects = current_cell & ((current_cell >> 1) |
1922 (next_cell << (Bitmap::kBitsPerCell - 1)));
1924 grey_objects = current_cell & (current_cell >> 1);
1928 while (grey_objects != 0) {
1929 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1930 grey_objects >>= trailing_zeros;
1931 offset += trailing_zeros;
1932 MarkBit markbit(cell, 1 << offset, false);
1933 DCHECK(Marking::IsGrey(markbit));
1934 Marking::GreyToBlack(markbit);
1935 Address addr = cell_base + offset * kPointerSize;
1936 HeapObject* object = HeapObject::FromAddress(addr);
1937 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1938 marking_deque->PushBlack(object);
1939 if (marking_deque->IsFull()) return;
1944 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1949 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1950 NewSpace* new_space, NewSpacePage* p) {
1951 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1952 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1953 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1954 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1956 MarkBit::CellType* cells = p->markbits()->cells();
1957 int survivors_size = 0;
1959 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1960 Address cell_base = it.CurrentCellBase();
1961 MarkBit::CellType* cell = it.CurrentCell();
1963 MarkBit::CellType current_cell = *cell;
1964 if (current_cell == 0) continue;
1967 while (current_cell != 0) {
1968 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
1969 current_cell >>= trailing_zeros;
1970 offset += trailing_zeros;
1971 Address address = cell_base + offset * kPointerSize;
1972 HeapObject* object = HeapObject::FromAddress(address);
1974 int size = object->Size();
1975 survivors_size += size;
1977 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1982 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1983 if (heap()->ShouldBePromoted(object->address(), size) &&
1984 TryPromoteObject(object, size)) {
1988 AllocationResult allocation = new_space->AllocateRaw(size);
1989 if (allocation.IsRetry()) {
1990 if (!new_space->AddFreshPage()) {
1991 // Shouldn't happen. We are sweeping linearly, and to-space
1992 // has the same number of pages as from-space, so there is
1996 allocation = new_space->AllocateRaw(size);
1997 DCHECK(!allocation.IsRetry());
1999 Object* target = allocation.ToObjectChecked();
2001 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
2002 heap()->IncrementSemiSpaceCopiedObjectSize(size);
2006 return survivors_size;
2010 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
2011 PagedSpace* space) {
2012 PageIterator it(space);
2013 while (it.has_next()) {
2014 Page* p = it.next();
2015 DiscoverGreyObjectsOnPage(marking_deque, p);
2016 if (marking_deque->IsFull()) return;
2021 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2022 MarkingDeque* marking_deque) {
2023 NewSpace* space = heap->new_space();
2024 NewSpacePageIterator it(space->bottom(), space->top());
2025 while (it.has_next()) {
2026 NewSpacePage* page = it.next();
2027 DiscoverGreyObjectsOnPage(marking_deque, page);
2028 if (marking_deque->IsFull()) return;
2033 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2035 if (!o->IsHeapObject()) return false;
2036 HeapObject* heap_object = HeapObject::cast(o);
2037 MarkBit mark = Marking::MarkBitFrom(heap_object);
2042 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2045 DCHECK(o->IsHeapObject());
2046 HeapObject* heap_object = HeapObject::cast(o);
2047 MarkBit mark = Marking::MarkBitFrom(heap_object);
2052 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2053 StringTable* string_table = heap()->string_table();
2054 // Mark the string table itself.
2055 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2056 if (!string_table_mark.Get()) {
2057 // String table could have already been marked by visiting the handles list.
2058 SetMark(string_table, string_table_mark);
2060 // Explicitly mark the prefix.
2061 string_table->IteratePrefix(visitor);
2062 ProcessMarkingDeque();
2066 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2067 MarkBit mark_bit = Marking::MarkBitFrom(site);
2068 SetMark(site, mark_bit);
2072 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2073 // Mark the heap roots including global variables, stack variables,
2074 // etc., and all objects reachable from them.
2075 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2077 // Handle the string table specially.
2078 MarkStringTable(visitor);
2080 MarkWeakObjectToCodeTable();
2082 // There may be overflowed objects in the heap. Visit them now.
2083 while (marking_deque_.overflowed()) {
2084 RefillMarkingDeque();
2085 EmptyMarkingDeque();
2090 void MarkCompactCollector::MarkImplicitRefGroups() {
2091 List<ImplicitRefGroup*>* ref_groups =
2092 isolate()->global_handles()->implicit_ref_groups();
2095 for (int i = 0; i < ref_groups->length(); i++) {
2096 ImplicitRefGroup* entry = ref_groups->at(i);
2097 DCHECK(entry != NULL);
2099 if (!IsMarked(*entry->parent)) {
2100 (*ref_groups)[last++] = entry;
2104 Object*** children = entry->children;
2105 // A parent object is marked, so mark all child heap objects.
2106 for (size_t j = 0; j < entry->length; ++j) {
2107 if ((*children[j])->IsHeapObject()) {
2108 HeapObject* child = HeapObject::cast(*children[j]);
2109 MarkBit mark = Marking::MarkBitFrom(child);
2110 MarkObject(child, mark);
2114 // Once the entire group has been marked, dispose it because it's
2115 // not needed anymore.
2118 ref_groups->Rewind(last);
2122 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2123 HeapObject* weak_object_to_code_table =
2124 HeapObject::cast(heap()->weak_object_to_code_table());
2125 if (!IsMarked(weak_object_to_code_table)) {
2126 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2127 SetMark(weak_object_to_code_table, mark);
2132 // Mark all objects reachable from the objects on the marking stack.
2133 // Before: the marking stack contains zero or more heap object pointers.
2134 // After: the marking stack is empty, and all objects reachable from the
2135 // marking stack have been marked, or are overflowed in the heap.
2136 void MarkCompactCollector::EmptyMarkingDeque() {
2137 while (!marking_deque_.IsEmpty()) {
2138 HeapObject* object = marking_deque_.Pop();
2139 DCHECK(object->IsHeapObject());
2140 DCHECK(heap()->Contains(object));
2141 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2143 Map* map = object->map();
2144 MarkBit map_mark = Marking::MarkBitFrom(map);
2145 MarkObject(map, map_mark);
2147 MarkCompactMarkingVisitor::IterateBody(map, object);
2152 // Sweep the heap for overflowed objects, clear their overflow bits, and
2153 // push them on the marking stack. Stop early if the marking stack fills
2154 // before sweeping completes. If sweeping completes, there are no remaining
2155 // overflowed objects in the heap so the overflow flag on the markings stack
2157 void MarkCompactCollector::RefillMarkingDeque() {
2158 DCHECK(marking_deque_.overflowed());
2160 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2161 if (marking_deque_.IsFull()) return;
2163 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2164 heap()->old_pointer_space());
2165 if (marking_deque_.IsFull()) return;
2167 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2168 if (marking_deque_.IsFull()) return;
2170 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2171 if (marking_deque_.IsFull()) return;
2173 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2174 if (marking_deque_.IsFull()) return;
2176 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2177 if (marking_deque_.IsFull()) return;
2179 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2180 heap()->property_cell_space());
2181 if (marking_deque_.IsFull()) return;
2183 LargeObjectIterator lo_it(heap()->lo_space());
2184 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
2185 if (marking_deque_.IsFull()) return;
2187 marking_deque_.ClearOverflowed();
2191 // Mark all objects reachable (transitively) from objects on the marking
2192 // stack. Before: the marking stack contains zero or more heap object
2193 // pointers. After: the marking stack is empty and there are no overflowed
2194 // objects in the heap.
2195 void MarkCompactCollector::ProcessMarkingDeque() {
2196 EmptyMarkingDeque();
2197 while (marking_deque_.overflowed()) {
2198 RefillMarkingDeque();
2199 EmptyMarkingDeque();
2204 // Mark all objects reachable (transitively) from objects on the marking
2205 // stack including references only considered in the atomic marking pause.
2206 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2207 bool work_to_do = true;
2208 DCHECK(marking_deque_.IsEmpty());
2209 while (work_to_do) {
2210 isolate()->global_handles()->IterateObjectGroups(
2211 visitor, &IsUnmarkedHeapObjectWithHeap);
2212 MarkImplicitRefGroups();
2213 ProcessWeakCollections();
2214 work_to_do = !marking_deque_.IsEmpty();
2215 ProcessMarkingDeque();
2220 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2221 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2222 !it.done(); it.Advance()) {
2223 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2226 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2227 Code* code = it.frame()->LookupCode();
2228 if (!code->CanDeoptAt(it.frame()->pc())) {
2229 code->CodeIterateBody(visitor);
2231 ProcessMarkingDeque();
2238 void MarkCompactCollector::MarkLiveObjects() {
2239 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2240 double start_time = 0.0;
2241 if (FLAG_print_cumulative_gc_stat) {
2242 start_time = base::OS::TimeCurrentMillis();
2244 // The recursive GC marker detects when it is nearing stack overflow,
2245 // and switches to a different marking system. JS interrupts interfere
2246 // with the C stack limit check.
2247 PostponeInterruptsScope postpone(isolate());
2249 bool incremental_marking_overflowed = false;
2250 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2251 if (was_marked_incrementally_) {
2252 // Finalize the incremental marking and check whether we had an overflow.
2253 // Both markers use grey color to mark overflowed objects so
2254 // non-incremental marker can deal with them as if overflow
2255 // occured during normal marking.
2256 // But incremental marker uses a separate marking deque
2257 // so we have to explicitly copy its overflow state.
2258 incremental_marking->Finalize();
2259 incremental_marking_overflowed =
2260 incremental_marking->marking_deque()->overflowed();
2261 incremental_marking->marking_deque()->ClearOverflowed();
2263 // Abort any pending incremental activities e.g. incremental sweeping.
2264 incremental_marking->Abort();
2268 DCHECK(state_ == PREPARE_GC);
2269 state_ = MARK_LIVE_OBJECTS;
2271 // The to space contains live objects, a page in from space is used as a
2273 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2274 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2275 if (FLAG_force_marking_deque_overflows) {
2276 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2278 marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2279 DCHECK(!marking_deque_.overflowed());
2281 if (incremental_marking_overflowed) {
2282 // There are overflowed objects left in the heap after incremental marking.
2283 marking_deque_.SetOverflowed();
2286 PrepareForCodeFlushing();
2288 if (was_marked_incrementally_) {
2289 // There is no write barrier on cells so we have to scan them now at the end
2290 // of the incremental marking.
2292 HeapObjectIterator cell_iterator(heap()->cell_space());
2294 while ((cell = cell_iterator.Next()) != NULL) {
2295 DCHECK(cell->IsCell());
2296 if (IsMarked(cell)) {
2297 int offset = Cell::kValueOffset;
2298 MarkCompactMarkingVisitor::VisitPointer(
2299 heap(), reinterpret_cast<Object**>(cell->address() + offset));
2304 HeapObjectIterator js_global_property_cell_iterator(
2305 heap()->property_cell_space());
2307 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2308 DCHECK(cell->IsPropertyCell());
2309 if (IsMarked(cell)) {
2310 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2316 RootMarkingVisitor root_visitor(heap());
2317 MarkRoots(&root_visitor);
2319 ProcessTopOptimizedFrame(&root_visitor);
2321 // The objects reachable from the roots are marked, yet unreachable
2322 // objects are unmarked. Mark objects reachable due to host
2323 // application specific logic or through Harmony weak maps.
2324 ProcessEphemeralMarking(&root_visitor);
2326 // The objects reachable from the roots, weak maps or object groups
2327 // are marked, yet unreachable objects are unmarked. Mark objects
2328 // reachable only from weak global handles.
2330 // First we identify nonlive weak handles and mark them as pending
2332 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2333 &IsUnmarkedHeapObject);
2334 // Then we mark the objects and process the transitive closure.
2335 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2336 while (marking_deque_.overflowed()) {
2337 RefillMarkingDeque();
2338 EmptyMarkingDeque();
2341 // Repeat host application specific and Harmony weak maps marking to
2342 // mark unmarked objects reachable from the weak roots.
2343 ProcessEphemeralMarking(&root_visitor);
2347 if (FLAG_print_cumulative_gc_stat) {
2348 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2353 void MarkCompactCollector::AfterMarking() {
2354 // Object literal map caches reference strings (cache keys) and maps
2355 // (cache values). At this point still useful maps have already been
2356 // marked. Mark the keys for the alive values before we process the
2360 // Prune the string table removing all strings only pointed to by the
2361 // string table. Cannot use string_table() here because the string
2363 StringTable* string_table = heap()->string_table();
2364 InternalizedStringTableCleaner internalized_visitor(heap());
2365 string_table->IterateElements(&internalized_visitor);
2366 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2368 ExternalStringTableCleaner external_visitor(heap());
2369 heap()->external_string_table_.Iterate(&external_visitor);
2370 heap()->external_string_table_.CleanUp();
2372 // Process the weak references.
2373 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2374 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2376 // Remove object groups after marking phase.
2377 heap()->isolate()->global_handles()->RemoveObjectGroups();
2378 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2380 // Flush code from collected candidates.
2381 if (is_code_flushing_enabled()) {
2382 code_flusher_->ProcessCandidates();
2383 // If incremental marker does not support code flushing, we need to
2384 // disable it before incremental marking steps for next cycle.
2385 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2386 EnableCodeFlushing(false);
2390 if (FLAG_track_gc_object_stats) {
2391 heap()->CheckpointObjectStats();
2396 void MarkCompactCollector::ProcessMapCaches() {
2397 Object* raw_context = heap()->native_contexts_list();
2398 while (raw_context != heap()->undefined_value()) {
2399 Context* context = reinterpret_cast<Context*>(raw_context);
2400 if (IsMarked(context)) {
2401 HeapObject* raw_map_cache =
2402 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2403 // A map cache may be reachable from the stack. In this case
2404 // it's already transitively marked and it's too late to clean
2406 if (!IsMarked(raw_map_cache) &&
2407 raw_map_cache != heap()->undefined_value()) {
2408 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2409 int existing_elements = map_cache->NumberOfElements();
2410 int used_elements = 0;
2411 for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
2412 i += MapCache::kEntrySize) {
2413 Object* raw_key = map_cache->get(i);
2414 if (raw_key == heap()->undefined_value() ||
2415 raw_key == heap()->the_hole_value())
2417 STATIC_ASSERT(MapCache::kEntrySize == 2);
2418 Object* raw_map = map_cache->get(i + 1);
2419 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2422 // Delete useless entries with unmarked maps.
2423 DCHECK(raw_map->IsMap());
2424 map_cache->set_the_hole(i);
2425 map_cache->set_the_hole(i + 1);
2428 if (used_elements == 0) {
2429 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2431 // Note: we don't actually shrink the cache here to avoid
2432 // extra complexity during GC. We rely on subsequent cache
2433 // usages (EnsureCapacity) to do this.
2434 map_cache->ElementsRemoved(existing_elements - used_elements);
2435 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2436 MarkObject(map_cache, map_cache_markbit);
2440 // Move to next element in the list.
2441 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2443 ProcessMarkingDeque();
2447 void MarkCompactCollector::ClearNonLiveReferences() {
2448 // Iterate over the map space, setting map transitions that go from
2449 // a marked map to an unmarked map to null transitions. This action
2450 // is carried out only on maps of JSObjects and related subtypes.
2451 HeapObjectIterator map_iterator(heap()->map_space());
2452 for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2453 obj = map_iterator.Next()) {
2454 Map* map = Map::cast(obj);
2456 if (!map->CanTransition()) continue;
2458 MarkBit map_mark = Marking::MarkBitFrom(map);
2459 ClearNonLivePrototypeTransitions(map);
2460 ClearNonLiveMapTransitions(map, map_mark);
2462 if (map_mark.Get()) {
2463 ClearNonLiveDependentCode(map->dependent_code());
2465 ClearDependentCode(map->dependent_code());
2466 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2470 // Iterate over property cell space, removing dependent code that is not
2471 // otherwise kept alive by strong references.
2472 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2473 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
2474 cell = cell_iterator.Next()) {
2475 if (IsMarked(cell)) {
2476 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2480 // Iterate over allocation sites, removing dependent code that is not
2481 // otherwise kept alive by strong references.
2482 Object* undefined = heap()->undefined_value();
2483 for (Object* site = heap()->allocation_sites_list(); site != undefined;
2484 site = AllocationSite::cast(site)->weak_next()) {
2485 if (IsMarked(site)) {
2486 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2490 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2491 WeakHashTable* table =
2492 WeakHashTable::cast(heap_->weak_object_to_code_table());
2493 uint32_t capacity = table->Capacity();
2494 for (uint32_t i = 0; i < capacity; i++) {
2495 uint32_t key_index = table->EntryToIndex(i);
2496 Object* key = table->get(key_index);
2497 if (!table->IsKey(key)) continue;
2498 uint32_t value_index = table->EntryToValueIndex(i);
2499 Object* value = table->get(value_index);
2500 if (key->IsCell() && !IsMarked(key)) {
2501 Cell* cell = Cell::cast(key);
2502 Object* object = cell->value();
2503 if (IsMarked(object)) {
2504 MarkBit mark = Marking::MarkBitFrom(cell);
2505 SetMark(cell, mark);
2506 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2507 RecordSlot(value_slot, value_slot, *value_slot);
2510 if (IsMarked(key)) {
2511 if (!IsMarked(value)) {
2512 HeapObject* obj = HeapObject::cast(value);
2513 MarkBit mark = Marking::MarkBitFrom(obj);
2516 ClearNonLiveDependentCode(DependentCode::cast(value));
2518 ClearDependentCode(DependentCode::cast(value));
2519 table->set(key_index, heap_->the_hole_value());
2520 table->set(value_index, heap_->the_hole_value());
2521 table->ElementRemoved();
2528 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2529 int number_of_transitions = map->NumberOfProtoTransitions();
2530 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2532 int new_number_of_transitions = 0;
2533 const int header = Map::kProtoTransitionHeaderSize;
2534 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2535 const int map_offset = header + Map::kProtoTransitionMapOffset;
2536 const int step = Map::kProtoTransitionElementsPerEntry;
2537 for (int i = 0; i < number_of_transitions; i++) {
2538 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2539 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2540 if (IsMarked(prototype) && IsMarked(cached_map)) {
2541 DCHECK(!prototype->IsUndefined());
2542 int proto_index = proto_offset + new_number_of_transitions * step;
2543 int map_index = map_offset + new_number_of_transitions * step;
2544 if (new_number_of_transitions != i) {
2545 prototype_transitions->set(proto_index, prototype,
2546 UPDATE_WRITE_BARRIER);
2547 prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
2549 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2550 RecordSlot(slot, slot, prototype);
2551 new_number_of_transitions++;
2555 if (new_number_of_transitions != number_of_transitions) {
2556 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2559 // Fill slots that became free with undefined value.
2560 for (int i = new_number_of_transitions * step;
2561 i < number_of_transitions * step; i++) {
2562 prototype_transitions->set_undefined(header + i);
2567 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2569 Object* potential_parent = map->GetBackPointer();
2570 if (!potential_parent->IsMap()) return;
2571 Map* parent = Map::cast(potential_parent);
2573 // Follow back pointer, check whether we are dealing with a map transition
2574 // from a live map to a dead path and in case clear transitions of parent.
2575 bool current_is_alive = map_mark.Get();
2576 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2577 if (!current_is_alive && parent_is_alive) {
2578 ClearMapTransitions(parent);
2583 // Clear a possible back pointer in case the transition leads to a dead map.
2584 // Return true in case a back pointer has been cleared and false otherwise.
2585 bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
2586 if (Marking::MarkBitFrom(target).Get()) return false;
2587 target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2592 void MarkCompactCollector::ClearMapTransitions(Map* map) {
2593 // If there are no transitions to be cleared, return.
2594 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2595 // properly cleared.
2596 if (!map->HasTransitionArray()) return;
2598 TransitionArray* t = map->transitions();
2600 int transition_index = 0;
2602 DescriptorArray* descriptors = map->instance_descriptors();
2603 bool descriptors_owner_died = false;
2605 // Compact all live descriptors to the left.
2606 for (int i = 0; i < t->number_of_transitions(); ++i) {
2607 Map* target = t->GetTarget(i);
2608 if (ClearMapBackPointer(target)) {
2609 if (target->instance_descriptors() == descriptors) {
2610 descriptors_owner_died = true;
2613 if (i != transition_index) {
2614 Name* key = t->GetKey(i);
2615 t->SetKey(transition_index, key);
2616 Object** key_slot = t->GetKeySlot(transition_index);
2617 RecordSlot(key_slot, key_slot, key);
2618 // Target slots do not need to be recorded since maps are not compacted.
2619 t->SetTarget(transition_index, t->GetTarget(i));
2625 // If there are no transitions to be cleared, return.
2626 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2627 // properly cleared.
2628 if (transition_index == t->number_of_transitions()) return;
2630 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2632 if (descriptors_owner_died) {
2633 if (number_of_own_descriptors > 0) {
2634 TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2635 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2636 map->set_owns_descriptors(true);
2638 DCHECK(descriptors == heap_->empty_descriptor_array());
2642 // Note that we never eliminate a transition array, though we might right-trim
2643 // such that number_of_transitions() == 0. If this assumption changes,
2644 // TransitionArray::CopyInsert() will need to deal with the case that a
2645 // transition array disappeared during GC.
2646 int trim = t->number_of_transitions() - transition_index;
2648 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2649 t, t->IsSimpleTransition() ? trim
2650 : trim * TransitionArray::kTransitionSize);
2652 DCHECK(map->HasTransitionArray());
2656 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2657 DescriptorArray* descriptors,
2658 int number_of_own_descriptors) {
2659 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2660 int to_trim = number_of_descriptors - number_of_own_descriptors;
2661 if (to_trim == 0) return;
2663 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2664 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2665 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2667 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2668 descriptors->Sort();
2672 void MarkCompactCollector::TrimEnumCache(Map* map,
2673 DescriptorArray* descriptors) {
2674 int live_enum = map->EnumLength();
2675 if (live_enum == kInvalidEnumCacheSentinel) {
2676 live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2678 if (live_enum == 0) return descriptors->ClearEnumCache();
2680 FixedArray* enum_cache = descriptors->GetEnumCache();
2682 int to_trim = enum_cache->length() - live_enum;
2683 if (to_trim <= 0) return;
2684 heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
2687 if (!descriptors->HasEnumIndicesCache()) return;
2688 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2689 heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2693 void MarkCompactCollector::ClearDependentICList(Object* head) {
2694 Object* current = head;
2695 Object* undefined = heap()->undefined_value();
2696 while (current != undefined) {
2697 Code* code = Code::cast(current);
2698 if (IsMarked(code)) {
2699 DCHECK(code->is_weak_stub());
2700 IC::InvalidateMaps(code);
2702 current = code->next_code_link();
2703 code->set_next_code_link(undefined);
2708 void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
2709 DisallowHeapAllocation no_allocation;
2710 DependentCode::GroupStartIndexes starts(entries);
2711 int number_of_entries = starts.number_of_entries();
2712 if (number_of_entries == 0) return;
2713 int g = DependentCode::kWeakICGroup;
2714 if (starts.at(g) != starts.at(g + 1)) {
2715 int i = starts.at(g);
2716 DCHECK(i + 1 == starts.at(g + 1));
2717 Object* head = entries->object_at(i);
2718 ClearDependentICList(head);
2720 g = DependentCode::kWeakCodeGroup;
2721 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2722 // If the entry is compilation info then the map must be alive,
2723 // and ClearDependentCode shouldn't be called.
2724 DCHECK(entries->is_code_at(i));
2725 Code* code = entries->code_at(i);
2726 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2727 code->set_marked_for_deoptimization(true);
2728 code->InvalidateEmbeddedObjects();
2729 have_code_to_deoptimize_ = true;
2732 for (int i = 0; i < number_of_entries; i++) {
2733 entries->clear_at(i);
2738 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2739 DependentCode* entries, int group, int start, int end, int new_start) {
2741 if (group == DependentCode::kWeakICGroup) {
2742 // Dependent weak IC stubs form a linked list and only the head is stored
2743 // in the dependent code array.
2745 DCHECK(start + 1 == end);
2746 Object* old_head = entries->object_at(start);
2747 MarkCompactWeakObjectRetainer retainer;
2748 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2749 entries->set_object_at(new_start, head);
2750 Object** slot = entries->slot_at(new_start);
2751 RecordSlot(slot, slot, head);
2752 // We do not compact this group even if the head is undefined,
2753 // more dependent ICs are likely to be added later.
2757 for (int i = start; i < end; i++) {
2758 Object* obj = entries->object_at(i);
2759 DCHECK(obj->IsCode() || IsMarked(obj));
2760 if (IsMarked(obj) &&
2761 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2762 if (new_start + survived != i) {
2763 entries->set_object_at(new_start + survived, obj);
2765 Object** slot = entries->slot_at(new_start + survived);
2766 RecordSlot(slot, slot, obj);
2771 entries->set_number_of_entries(
2772 static_cast<DependentCode::DependencyGroup>(group), survived);
2777 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2778 DisallowHeapAllocation no_allocation;
2779 DependentCode::GroupStartIndexes starts(entries);
2780 int number_of_entries = starts.number_of_entries();
2781 if (number_of_entries == 0) return;
2782 int new_number_of_entries = 0;
2783 // Go through all groups, remove dead codes and compact.
2784 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2785 int survived = ClearNonLiveDependentCodeInGroup(
2786 entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2787 new_number_of_entries += survived;
2789 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2790 entries->clear_at(i);
2795 void MarkCompactCollector::ProcessWeakCollections() {
2796 GCTracer::Scope gc_scope(heap()->tracer(),
2797 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2798 Object* weak_collection_obj = heap()->encountered_weak_collections();
2799 while (weak_collection_obj != Smi::FromInt(0)) {
2800 JSWeakCollection* weak_collection =
2801 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2802 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2803 if (weak_collection->table()->IsHashTable()) {
2804 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2805 Object** anchor = reinterpret_cast<Object**>(table->address());
2806 for (int i = 0; i < table->Capacity(); i++) {
2807 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2809 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2810 RecordSlot(anchor, key_slot, *key_slot);
2811 Object** value_slot =
2812 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2813 MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2818 weak_collection_obj = weak_collection->next();
2823 void MarkCompactCollector::ClearWeakCollections() {
2824 GCTracer::Scope gc_scope(heap()->tracer(),
2825 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2826 Object* weak_collection_obj = heap()->encountered_weak_collections();
2827 while (weak_collection_obj != Smi::FromInt(0)) {
2828 JSWeakCollection* weak_collection =
2829 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2830 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2831 if (weak_collection->table()->IsHashTable()) {
2832 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2833 for (int i = 0; i < table->Capacity(); i++) {
2834 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2835 if (!MarkCompactCollector::IsMarked(key)) {
2836 table->RemoveEntry(i);
2840 weak_collection_obj = weak_collection->next();
2841 weak_collection->set_next(heap()->undefined_value());
2843 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2847 void MarkCompactCollector::AbortWeakCollections() {
2848 GCTracer::Scope gc_scope(heap()->tracer(),
2849 GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
2850 Object* weak_collection_obj = heap()->encountered_weak_collections();
2851 while (weak_collection_obj != Smi::FromInt(0)) {
2852 JSWeakCollection* weak_collection =
2853 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2854 weak_collection_obj = weak_collection->next();
2855 weak_collection->set_next(heap()->undefined_value());
2857 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2861 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
2862 if (heap_->InNewSpace(value)) {
2863 heap_->store_buffer()->Mark(slot);
2864 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2865 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2866 reinterpret_cast<Object**>(slot),
2867 SlotsBuffer::IGNORE_OVERFLOW);
2872 // We scavange new space simultaneously with sweeping. This is done in two
2875 // The first pass migrates all alive objects from one semispace to another or
2876 // promotes them to old space. Forwarding address is written directly into
2877 // first word of object without any encoding. If object is dead we write
2878 // NULL as a forwarding address.
2880 // The second pass updates pointers to new space in all spaces. It is possible
2881 // to encounter pointers to dead new space objects during traversal of pointers
2882 // to new space. We should clear them to avoid encountering them during next
2883 // pointer iteration. This is an issue if the store buffer overflows and we
2884 // have to scan the entire old space, including dead objects, looking for
2885 // pointers to new space.
2886 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2887 int size, AllocationSpace dest) {
2888 Address dst_addr = dst->address();
2889 Address src_addr = src->address();
2890 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2891 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2892 if (dest == OLD_POINTER_SPACE) {
2893 Address src_slot = src_addr;
2894 Address dst_slot = dst_addr;
2895 DCHECK(IsAligned(size, kPointerSize));
2897 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2898 Object* value = Memory::Object_at(src_slot);
2900 Memory::Object_at(dst_slot) = value;
2902 // We special case ConstantPoolArrays below since they could contain
2903 // integers value entries which look like tagged pointers.
2904 // TODO(mstarzinger): restructure this code to avoid this special-casing.
2905 if (!src->IsConstantPoolArray()) {
2906 RecordMigratedSlot(value, dst_slot);
2909 src_slot += kPointerSize;
2910 dst_slot += kPointerSize;
2913 if (compacting_ && dst->IsJSFunction()) {
2914 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2915 Address code_entry = Memory::Address_at(code_entry_slot);
2917 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2918 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2919 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2920 SlotsBuffer::IGNORE_OVERFLOW);
2922 } else if (dst->IsConstantPoolArray()) {
2923 ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2924 ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2925 while (!code_iter.is_finished()) {
2926 Address code_entry_slot =
2927 dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2928 Address code_entry = Memory::Address_at(code_entry_slot);
2930 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2931 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2932 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2933 SlotsBuffer::IGNORE_OVERFLOW);
2936 ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2937 while (!heap_iter.is_finished()) {
2939 dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2940 Object* value = Memory::Object_at(heap_slot);
2941 RecordMigratedSlot(value, heap_slot);
2944 } else if (dest == CODE_SPACE) {
2945 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2946 heap()->MoveBlock(dst_addr, src_addr, size);
2947 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2948 SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
2949 SlotsBuffer::IGNORE_OVERFLOW);
2950 Code::cast(dst)->Relocate(dst_addr - src_addr);
2952 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2953 heap()->MoveBlock(dst_addr, src_addr, size);
2955 heap()->OnMoveEvent(dst, src, size);
2956 Memory::Address_at(src_addr) = dst_addr;
2960 // Visitor for updating pointers from live objects in old spaces to new space.
2961 // It does not expect to encounter pointers to dead objects.
2962 class PointersUpdatingVisitor : public ObjectVisitor {
2964 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2966 void VisitPointer(Object** p) { UpdatePointer(p); }
2968 void VisitPointers(Object** start, Object** end) {
2969 for (Object** p = start; p < end; p++) UpdatePointer(p);
2972 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2973 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2974 Object* target = rinfo->target_object();
2975 Object* old_target = target;
2976 VisitPointer(&target);
2977 // Avoid unnecessary changes that might unnecessary flush the instruction
2979 if (target != old_target) {
2980 rinfo->set_target_object(target);
2984 void VisitCodeTarget(RelocInfo* rinfo) {
2985 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2986 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2987 Object* old_target = target;
2988 VisitPointer(&target);
2989 if (target != old_target) {
2990 rinfo->set_target_address(Code::cast(target)->instruction_start());
2994 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2995 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2996 Object* stub = rinfo->code_age_stub();
2997 DCHECK(stub != NULL);
2998 VisitPointer(&stub);
2999 if (stub != rinfo->code_age_stub()) {
3000 rinfo->set_code_age_stub(Code::cast(stub));
3004 void VisitDebugTarget(RelocInfo* rinfo) {
3005 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
3006 rinfo->IsPatchedReturnSequence()) ||
3007 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
3008 rinfo->IsPatchedDebugBreakSlotSequence()));
3009 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
3010 VisitPointer(&target);
3011 rinfo->set_call_address(Code::cast(target)->instruction_start());
3014 static inline void UpdateSlot(Heap* heap, Object** slot) {
3015 Object* obj = *slot;
3017 if (!obj->IsHeapObject()) return;
3019 HeapObject* heap_obj = HeapObject::cast(obj);
3021 MapWord map_word = heap_obj->map_word();
3022 if (map_word.IsForwardingAddress()) {
3023 DCHECK(heap->InFromSpace(heap_obj) ||
3024 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
3025 HeapObject* target = map_word.ToForwardingAddress();
3027 DCHECK(!heap->InFromSpace(target) &&
3028 !MarkCompactCollector::IsOnEvacuationCandidate(target));
3033 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
3039 static void UpdatePointer(HeapObject** address, HeapObject* object) {
3040 Address new_addr = Memory::Address_at(object->address());
3042 // The new space sweep will overwrite the map word of dead objects
3043 // with NULL. In this case we do not need to transfer this entry to
3044 // the store buffer which we are rebuilding.
3045 // We perform the pointer update with a no barrier compare-and-swap. The
3046 // compare and swap may fail in the case where the pointer update tries to
3047 // update garbage memory which was concurrently accessed by the sweeper.
3048 if (new_addr != NULL) {
3049 base::NoBarrier_CompareAndSwap(
3050 reinterpret_cast<base::AtomicWord*>(address),
3051 reinterpret_cast<base::AtomicWord>(object),
3052 reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
3057 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3059 MapWord map_word = HeapObject::cast(*p)->map_word();
3061 if (map_word.IsForwardingAddress()) {
3062 return String::cast(map_word.ToForwardingAddress());
3065 return String::cast(*p);
3069 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3071 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3073 OldSpace* target_space = heap()->TargetSpace(object);
3075 DCHECK(target_space == heap()->old_pointer_space() ||
3076 target_space == heap()->old_data_space());
3078 AllocationResult allocation = target_space->AllocateRaw(object_size);
3079 if (allocation.To(&target)) {
3080 MigrateObject(target, object, object_size, target_space->identity());
3081 heap()->IncrementPromotedObjectsSize(object_size);
3089 void MarkCompactCollector::EvacuateNewSpace() {
3090 // There are soft limits in the allocation code, designed trigger a mark
3091 // sweep collection by failing allocations. But since we are already in
3092 // a mark-sweep allocation, there is no sense in trying to trigger one.
3093 AlwaysAllocateScope scope(isolate());
3095 NewSpace* new_space = heap()->new_space();
3097 // Store allocation range before flipping semispaces.
3098 Address from_bottom = new_space->bottom();
3099 Address from_top = new_space->top();
3101 // Flip the semispaces. After flipping, to space is empty, from space has
3104 new_space->ResetAllocationInfo();
3106 int survivors_size = 0;
3108 // First pass: traverse all objects in inactive semispace, remove marks,
3109 // migrate live objects and write forwarding addresses. This stage puts
3110 // new entries in the store buffer and may cause some pages to be marked
3111 // scan-on-scavenge.
3112 NewSpacePageIterator it(from_bottom, from_top);
3113 while (it.has_next()) {
3114 NewSpacePage* p = it.next();
3115 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3118 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3119 new_space->set_age_mark(new_space->top());
3123 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3124 AlwaysAllocateScope always_allocate(isolate());
3125 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3126 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3127 p->MarkSweptPrecisely();
3131 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3132 Address cell_base = it.CurrentCellBase();
3133 MarkBit::CellType* cell = it.CurrentCell();
3135 if (*cell == 0) continue;
3137 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3138 for (int i = 0; i < live_objects; i++) {
3139 Address object_addr = cell_base + offsets[i] * kPointerSize;
3140 HeapObject* object = HeapObject::FromAddress(object_addr);
3141 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3143 int size = object->Size();
3145 HeapObject* target_object;
3146 AllocationResult allocation = space->AllocateRaw(size);
3147 if (!allocation.To(&target_object)) {
3148 // If allocation failed, use emergency memory and re-try allocation.
3149 CHECK(space->HasEmergencyMemory());
3150 space->UseEmergencyMemory();
3151 allocation = space->AllocateRaw(size);
3153 if (!allocation.To(&target_object)) {
3154 // OS refused to give us memory.
3155 V8::FatalProcessOutOfMemory("Evacuation");
3159 MigrateObject(target_object, object, size, space->identity());
3160 DCHECK(object->map_word().IsForwardingAddress());
3163 // Clear marking bits for current cell.
3166 p->ResetLiveBytes();
3170 void MarkCompactCollector::EvacuatePages() {
3171 int npages = evacuation_candidates_.length();
3172 for (int i = 0; i < npages; i++) {
3173 Page* p = evacuation_candidates_[i];
3174 DCHECK(p->IsEvacuationCandidate() ||
3175 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3176 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3177 MemoryChunk::SWEEPING_DONE);
3178 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3179 // Allocate emergency memory for the case when compaction fails due to out
3181 if (!space->HasEmergencyMemory()) {
3182 space->CreateEmergencyMemory();
3184 if (p->IsEvacuationCandidate()) {
3185 // During compaction we might have to request a new page. Check that we
3186 // have an emergency page and the space still has room for that.
3187 if (space->HasEmergencyMemory() && space->CanExpand()) {
3188 EvacuateLiveObjectsFromPage(p);
3190 // Without room for expansion evacuation is not guaranteed to succeed.
3191 // Pessimistically abandon unevacuated pages.
3192 for (int j = i; j < npages; j++) {
3193 Page* page = evacuation_candidates_[j];
3194 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3195 page->ClearEvacuationCandidate();
3196 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3203 // Release emergency memory.
3204 PagedSpaces spaces(heap());
3205 for (PagedSpace* space = spaces.next(); space != NULL;
3206 space = spaces.next()) {
3207 if (space->HasEmergencyMemory()) {
3208 space->FreeEmergencyMemory();
3215 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3217 virtual Object* RetainAs(Object* object) {
3218 if (object->IsHeapObject()) {
3219 HeapObject* heap_object = HeapObject::cast(object);
3220 MapWord map_word = heap_object->map_word();
3221 if (map_word.IsForwardingAddress()) {
3222 return map_word.ToForwardingAddress();
3230 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3231 SlotsBuffer::SlotType slot_type, Address addr) {
3232 switch (slot_type) {
3233 case SlotsBuffer::CODE_TARGET_SLOT: {
3234 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3235 rinfo.Visit(isolate, v);
3238 case SlotsBuffer::CODE_ENTRY_SLOT: {
3239 v->VisitCodeEntry(addr);
3242 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3243 HeapObject* obj = HeapObject::FromAddress(addr);
3244 Code::cast(obj)->CodeIterateBody(v);
3247 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3248 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3249 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3252 case SlotsBuffer::JS_RETURN_SLOT: {
3253 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3254 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3257 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3258 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3259 rinfo.Visit(isolate, v);
3269 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3272 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3275 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3278 template <MarkCompactCollector::SweepingParallelism mode>
3279 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3281 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3282 DCHECK(free_list == NULL);
3283 return space->Free(start, size);
3285 // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3286 return size - free_list->Free(start, size);
3291 // Sweep a space precisely. After this has been done the space can
3292 // be iterated precisely, hitting only the live objects. Code space
3293 // is always swept precisely because we want to be able to iterate
3294 // over it. Map space is swept precisely, because it is not compacted.
3295 // Slots in live objects pointing into evacuation candidates are updated
3297 // Returns the size of the biggest continuous freed memory chunk in bytes.
3298 template <SweepingMode sweeping_mode,
3299 MarkCompactCollector::SweepingParallelism parallelism,
3300 SkipListRebuildingMode skip_list_mode,
3301 FreeSpaceTreatmentMode free_space_mode>
3302 static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
3304 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3305 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3306 space->identity() == CODE_SPACE);
3307 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3308 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3309 sweeping_mode == SWEEP_ONLY);
3311 Address free_start = p->area_start();
3312 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3315 SkipList* skip_list = p->skip_list();
3316 int curr_region = -1;
3317 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3321 intptr_t freed_bytes = 0;
3322 intptr_t max_freed_bytes = 0;
3324 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3325 Address cell_base = it.CurrentCellBase();
3326 MarkBit::CellType* cell = it.CurrentCell();
3327 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3329 for (; live_objects != 0; live_objects--) {
3330 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3331 if (free_end != free_start) {
3332 int size = static_cast<int>(free_end - free_start);
3333 if (free_space_mode == ZAP_FREE_SPACE) {
3334 memset(free_start, 0xcc, size);
3336 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3337 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3338 #ifdef ENABLE_GDB_JIT_INTERFACE
3339 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3340 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3344 HeapObject* live_object = HeapObject::FromAddress(free_end);
3345 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3346 Map* map = live_object->map();
3347 int size = live_object->SizeFromMap(map);
3348 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3349 live_object->IterateBody(map->instance_type(), size, v);
3351 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3352 int new_region_start = SkipList::RegionNumber(free_end);
3353 int new_region_end =
3354 SkipList::RegionNumber(free_end + size - kPointerSize);
3355 if (new_region_start != curr_region || new_region_end != curr_region) {
3356 skip_list->AddObject(free_end, size);
3357 curr_region = new_region_end;
3360 free_start = free_end + size;
3362 // Clear marking bits for current cell.
3365 if (free_start != p->area_end()) {
3366 int size = static_cast<int>(p->area_end() - free_start);
3367 if (free_space_mode == ZAP_FREE_SPACE) {
3368 memset(free_start, 0xcc, size);
3370 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3371 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3372 #ifdef ENABLE_GDB_JIT_INTERFACE
3373 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3374 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3378 p->ResetLiveBytes();
3380 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3381 // When concurrent sweeping is active, the page will be marked after
3382 // sweeping by the main thread.
3383 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3385 p->MarkSweptPrecisely();
3387 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3391 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3392 Page* p = Page::FromAddress(code->address());
3394 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3398 Address code_start = code->address();
3399 Address code_end = code_start + code->Size();
3401 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3402 uint32_t end_index =
3403 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3405 Bitmap* b = p->markbits();
3407 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3408 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3410 MarkBit::CellType* start_cell = start_mark_bit.cell();
3411 MarkBit::CellType* end_cell = end_mark_bit.cell();
3414 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3415 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3417 if (start_cell == end_cell) {
3418 *start_cell |= start_mask & end_mask;
3420 *start_cell |= start_mask;
3421 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3424 *end_cell |= end_mask;
3427 for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3436 static bool IsOnInvalidatedCodeObject(Address addr) {
3437 // We did not record any slots in large objects thus
3438 // we can safely go to the page from the slot address.
3439 Page* p = Page::FromAddress(addr);
3441 // First check owner's identity because old pointer and old data spaces
3442 // are swept lazily and might still have non-zero mark-bits on some
3444 if (p->owner()->identity() != CODE_SPACE) return false;
3446 // In code space only bits on evacuation candidates (but we don't record
3447 // any slots on them) and under invalidated code objects are non-zero.
3449 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3451 return mark_bit.Get();
3455 void MarkCompactCollector::InvalidateCode(Code* code) {
3456 if (heap_->incremental_marking()->IsCompacting() &&
3457 !ShouldSkipEvacuationSlotRecording(code)) {
3458 DCHECK(compacting_);
3460 // If the object is white than no slots were recorded on it yet.
3461 MarkBit mark_bit = Marking::MarkBitFrom(code);
3462 if (Marking::IsWhite(mark_bit)) return;
3464 invalidated_code_.Add(code);
3469 // Return true if the given code is deoptimized or will be deoptimized.
3470 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3471 return code->is_optimized_code() && code->marked_for_deoptimization();
3475 bool MarkCompactCollector::MarkInvalidatedCode() {
3476 bool code_marked = false;
3478 int length = invalidated_code_.length();
3479 for (int i = 0; i < length; i++) {
3480 Code* code = invalidated_code_[i];
3482 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3491 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3492 int length = invalidated_code_.length();
3493 for (int i = 0; i < length; i++) {
3494 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3499 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3500 int length = invalidated_code_.length();
3501 for (int i = 0; i < length; i++) {
3502 Code* code = invalidated_code_[i];
3504 code->Iterate(visitor);
3505 SetMarkBitsUnderInvalidatedCode(code, false);
3508 invalidated_code_.Rewind(0);
3512 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3513 Heap::RelocationLock relocation_lock(heap());
3515 bool code_slots_filtering_required;
3517 GCTracer::Scope gc_scope(heap()->tracer(),
3518 GCTracer::Scope::MC_SWEEP_NEWSPACE);
3519 code_slots_filtering_required = MarkInvalidatedCode();
3524 GCTracer::Scope gc_scope(heap()->tracer(),
3525 GCTracer::Scope::MC_EVACUATE_PAGES);
3529 // Second pass: find pointers to new space and update them.
3530 PointersUpdatingVisitor updating_visitor(heap());
3533 GCTracer::Scope gc_scope(heap()->tracer(),
3534 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3535 // Update pointers in to space.
3536 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3537 heap()->new_space()->top());
3538 for (HeapObject* object = to_it.Next(); object != NULL;
3539 object = to_it.Next()) {
3540 Map* map = object->map();
3541 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3547 GCTracer::Scope gc_scope(heap()->tracer(),
3548 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3550 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3554 GCTracer::Scope gc_scope(heap()->tracer(),
3555 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3556 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3557 &Heap::ScavengeStoreBufferCallback);
3558 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3563 GCTracer::Scope gc_scope(heap()->tracer(),
3564 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3565 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
3566 code_slots_filtering_required);
3567 if (FLAG_trace_fragmentation) {
3568 PrintF(" migration slots buffer: %d\n",
3569 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3572 if (compacting_ && was_marked_incrementally_) {
3573 // It's difficult to filter out slots recorded for large objects.
3574 LargeObjectIterator it(heap_->lo_space());
3575 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3576 // LargeObjectSpace is not swept yet thus we have to skip
3577 // dead objects explicitly.
3578 if (!IsMarked(obj)) continue;
3580 Page* p = Page::FromAddress(obj->address());
3581 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3582 obj->Iterate(&updating_visitor);
3583 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3589 int npages = evacuation_candidates_.length();
3591 GCTracer::Scope gc_scope(
3593 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3594 for (int i = 0; i < npages; i++) {
3595 Page* p = evacuation_candidates_[i];
3596 DCHECK(p->IsEvacuationCandidate() ||
3597 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3599 if (p->IsEvacuationCandidate()) {
3600 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
3601 code_slots_filtering_required);
3602 if (FLAG_trace_fragmentation) {
3603 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3604 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3607 // Important: skip list should be cleared only after roots were updated
3608 // because root iteration traverses the stack and might have to find
3609 // code objects from non-updated pc pointing into evacuation candidate.
3610 SkipList* list = p->skip_list();
3611 if (list != NULL) list->Clear();
3613 if (FLAG_gc_verbose) {
3614 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3615 reinterpret_cast<intptr_t>(p));
3617 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3618 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3620 switch (space->identity()) {
3621 case OLD_DATA_SPACE:
3622 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
3624 case OLD_POINTER_SPACE:
3625 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3626 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
3627 space, NULL, p, &updating_visitor);
3630 if (FLAG_zap_code_space) {
3631 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3632 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
3633 space, NULL, p, &updating_visitor);
3635 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3636 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
3637 space, NULL, p, &updating_visitor);
3648 GCTracer::Scope gc_scope(heap()->tracer(),
3649 GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3651 // Update pointers from cells.
3652 HeapObjectIterator cell_iterator(heap_->cell_space());
3653 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3654 cell = cell_iterator.Next()) {
3655 if (cell->IsCell()) {
3656 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3660 HeapObjectIterator js_global_property_cell_iterator(
3661 heap_->property_cell_space());
3662 for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3663 cell = js_global_property_cell_iterator.Next()) {
3664 if (cell->IsPropertyCell()) {
3665 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3669 heap_->string_table()->Iterate(&updating_visitor);
3670 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3671 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3672 WeakHashTable* table =
3673 WeakHashTable::cast(heap_->weak_object_to_code_table());
3674 table->Iterate(&updating_visitor);
3675 table->Rehash(heap_->isolate()->factory()->undefined_value());
3678 // Update pointers from external string table.
3679 heap_->UpdateReferencesInExternalStringTable(
3680 &UpdateReferenceInExternalStringTableEntry);
3682 EvacuationWeakObjectRetainer evacuation_object_retainer;
3683 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3685 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3687 ProcessInvalidatedCode(&updating_visitor);
3689 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3691 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3692 DCHECK(migration_slots_buffer_ == NULL);
3696 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3697 int npages = evacuation_candidates_.length();
3698 for (int i = 0; i < npages; i++) {
3699 Page* p = evacuation_candidates_[i];
3700 if (!p->IsEvacuationCandidate()) continue;
3702 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3703 p->InsertAfter(space->LastPage());
3708 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3709 int npages = evacuation_candidates_.length();
3710 for (int i = 0; i < npages; i++) {
3711 Page* p = evacuation_candidates_[i];
3712 if (!p->IsEvacuationCandidate()) continue;
3713 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3714 space->Free(p->area_start(), p->area_size());
3715 p->set_scan_on_scavenge(false);
3716 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3717 p->ResetLiveBytes();
3718 space->ReleasePage(p);
3720 evacuation_candidates_.Rewind(0);
3721 compacting_ = false;
3722 heap()->FreeQueuedChunks();
3726 static const int kStartTableEntriesPerLine = 5;
3727 static const int kStartTableLines = 171;
3728 static const int kStartTableInvalidLine = 127;
3729 static const int kStartTableUnusedEntry = 126;
3731 #define _ kStartTableUnusedEntry
3732 #define X kStartTableInvalidLine
3733 // Mark-bit to object start offset table.
3735 // The line is indexed by the mark bits in a byte. The first number on
3736 // the line describes the number of live object starts for the line and the
3737 // other numbers on the line describe the offsets (in words) of the object
3740 // Since objects are at least 2 words large we don't have entries for two
3741 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3742 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
4090 // Takes a word of mark bits. Returns the number of objects that start in the
4091 // range. Puts the offsets of the words in the supplied array.
4092 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
4096 // No consecutive 1 bits.
4097 DCHECK((mark_bits & 0x180) != 0x180);
4098 DCHECK((mark_bits & 0x18000) != 0x18000);
4099 DCHECK((mark_bits & 0x1800000) != 0x1800000);
4101 while (mark_bits != 0) {
4102 int byte = (mark_bits & 0xff);
4105 DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
4106 char* table = kStartTable + byte * kStartTableEntriesPerLine;
4107 int objects_in_these_8_words = table[0];
4108 DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
4109 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
4110 for (int i = 0; i < objects_in_these_8_words; i++) {
4111 starts[objects++] = offset + table[1 + i];
4120 static inline Address DigestFreeStart(Address approximate_free_start,
4121 uint32_t free_start_cell) {
4122 DCHECK(free_start_cell != 0);
4124 // No consecutive 1 bits.
4125 DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
4128 uint32_t cell = free_start_cell;
4129 int offset_of_last_live;
4130 if ((cell & 0x80000000u) != 0) {
4131 // This case would overflow below.
4132 offset_of_last_live = 31;
4134 // Remove all but one bit, the most significant. This is an optimization
4135 // that may or may not be worthwhile.
4141 cell = (cell + 1) >> 1;
4142 int live_objects = MarkWordToObjectStarts(cell, offsets);
4143 DCHECK(live_objects == 1);
4144 offset_of_last_live = offsets[live_objects - 1];
4146 Address last_live_start =
4147 approximate_free_start + offset_of_last_live * kPointerSize;
4148 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
4149 Address free_start = last_live_start + last_live->Size();
4154 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
4157 // No consecutive 1 bits.
4158 DCHECK((cell & (cell << 1)) == 0);
4161 if (cell == 0x80000000u) { // Avoid overflow below.
4162 return block_address + 31 * kPointerSize;
4164 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
4165 DCHECK((first_set_bit & cell) == first_set_bit);
4166 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
4167 DCHECK(live_objects == 1);
4169 return block_address + offsets[0] * kPointerSize;
4173 // Force instantiation of templatized SweepConservatively method for
4174 // SWEEP_ON_MAIN_THREAD mode.
4175 template int MarkCompactCollector::SweepConservatively<
4176 MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
4179 // Force instantiation of templatized SweepConservatively method for
4180 // SWEEP_IN_PARALLEL mode.
4181 template int MarkCompactCollector::SweepConservatively<
4182 MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
4185 // Sweeps a space conservatively. After this has been done the larger free
4186 // spaces have been put on the free list and the smaller ones have been
4187 // ignored and left untouched. A free space is always either ignored or put
4188 // on the free list, never split up into two parts. This is important
4189 // because it means that any FreeSpace maps left actually describe a region of
4190 // memory that can be ignored when scanning. Dead objects other than free
4191 // spaces will not contain the free space map.
4192 template <MarkCompactCollector::SweepingParallelism mode>
4193 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
4194 FreeList* free_list, Page* p) {
4195 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
4197 (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
4198 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
4199 free_list == NULL));
4201 intptr_t freed_bytes = 0;
4202 intptr_t max_freed_bytes = 0;
4205 // Skip over all the dead objects at the start of the page and mark them free.
4206 Address cell_base = 0;
4207 MarkBit::CellType* cell = NULL;
4208 MarkBitCellIterator it(p);
4209 for (; !it.Done(); it.Advance()) {
4210 cell_base = it.CurrentCellBase();
4211 cell = it.CurrentCell();
4212 if (*cell != 0) break;
4216 size = p->area_end() - p->area_start();
4218 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
4219 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4220 DCHECK_EQ(0, p->LiveBytes());
4221 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
4222 // When concurrent sweeping is active, the page will be marked after
4223 // sweeping by the main thread.
4224 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
4226 p->MarkSweptConservatively();
4228 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
4231 // Grow the size of the start-of-page free space a little to get up to the
4232 // first live object.
4233 Address free_end = StartOfLiveObject(cell_base, *cell);
4234 // Free the first free space.
4235 size = free_end - p->area_start();
4237 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
4238 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4240 // The start of the current free area is represented in undigested form by
4241 // the address of the last 32-word section that contained a live object and
4242 // the marking bitmap for that cell, which describes where the live object
4243 // started. Unless we find a large free space in the bitmap we will not
4244 // digest this pair into a real address. We start the iteration here at the
4245 // first word in the marking bit map that indicates a live object.
4246 Address free_start = cell_base;
4247 MarkBit::CellType free_start_cell = *cell;
4249 for (; !it.Done(); it.Advance()) {
4250 cell_base = it.CurrentCellBase();
4251 cell = it.CurrentCell();
4253 // We have a live object. Check approximately whether it is more than 32
4254 // words since the last live object.
4255 if (cell_base - free_start > 32 * kPointerSize) {
4256 free_start = DigestFreeStart(free_start, free_start_cell);
4257 if (cell_base - free_start > 32 * kPointerSize) {
4258 // Now that we know the exact start of the free space it still looks
4259 // like we have a large enough free space to be worth bothering with.
4260 // so now we need to find the start of the first live object at the
4261 // end of the free space.
4262 free_end = StartOfLiveObject(cell_base, *cell);
4263 freed_bytes = Free<mode>(space, free_list, free_start,
4264 static_cast<int>(free_end - free_start));
4265 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4268 // Update our undigested record of where the current free area started.
4269 free_start = cell_base;
4270 free_start_cell = *cell;
4271 // Clear marking bits for current cell.
4276 // Handle the free space at the end of the page.
4277 if (cell_base - free_start > 32 * kPointerSize) {
4278 free_start = DigestFreeStart(free_start, free_start_cell);
4279 freed_bytes = Free<mode>(space, free_list, free_start,
4280 static_cast<int>(p->area_end() - free_start));
4281 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4284 p->ResetLiveBytes();
4285 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
4286 // When concurrent sweeping is active, the page will be marked after
4287 // sweeping by the main thread.
4288 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
4290 p->MarkSweptConservatively();
4292 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
4296 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4297 int required_freed_bytes) {
4299 int max_freed_overall = 0;
4300 PageIterator it(space);
4301 while (it.has_next()) {
4302 Page* p = it.next();
4303 max_freed = SweepInParallel(p, space);
4304 DCHECK(max_freed >= 0);
4305 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4308 max_freed_overall = Max(max_freed, max_freed_overall);
4309 if (p == space->end_of_unswept_pages()) break;
4311 return max_freed_overall;
4315 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4317 if (page->TryParallelSweeping()) {
4318 FreeList* free_list = space == heap()->old_pointer_space()
4319 ? free_list_old_pointer_space_.get()
4320 : free_list_old_data_space_.get();
4321 FreeList private_free_list(space);
4322 if (space->swept_precisely()) {
4323 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
4324 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4325 space, &private_free_list, page, NULL);
4327 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
4328 space, &private_free_list, page);
4330 free_list->Concatenate(&private_free_list);
4336 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4337 space->set_swept_precisely(sweeper == PRECISE ||
4338 sweeper == CONCURRENT_PRECISE ||
4339 sweeper == PARALLEL_PRECISE);
4340 space->ClearStats();
4342 // We defensively initialize end_of_unswept_pages_ here with the first page
4343 // of the pages list.
4344 space->set_end_of_unswept_pages(space->FirstPage());
4346 PageIterator it(space);
4348 int pages_swept = 0;
4349 bool unused_page_present = false;
4350 bool parallel_sweeping_active = false;
4352 while (it.has_next()) {
4353 Page* p = it.next();
4354 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4356 // Clear sweeping flags indicating that marking bits are still intact.
4357 p->ClearSweptPrecisely();
4358 p->ClearSweptConservatively();
4360 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4361 p->IsEvacuationCandidate()) {
4362 // Will be processed in EvacuateNewSpaceAndCandidates.
4363 DCHECK(evacuation_candidates_.length() > 0);
4367 // One unused page is kept, all further are released before sweeping them.
4368 if (p->LiveBytes() == 0) {
4369 if (unused_page_present) {
4370 if (FLAG_gc_verbose) {
4371 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4372 reinterpret_cast<intptr_t>(p));
4374 // Adjust unswept free bytes because releasing a page expects said
4375 // counter to be accurate for unswept pages.
4376 space->IncreaseUnsweptFreeBytes(p);
4377 space->ReleasePage(p);
4380 unused_page_present = true;
4384 case CONCURRENT_CONSERVATIVE:
4385 case PARALLEL_CONSERVATIVE: {
4386 if (!parallel_sweeping_active) {
4387 if (FLAG_gc_verbose) {
4388 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4389 reinterpret_cast<intptr_t>(p));
4391 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4393 parallel_sweeping_active = true;
4395 if (FLAG_gc_verbose) {
4396 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4397 reinterpret_cast<intptr_t>(p));
4399 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4400 space->IncreaseUnsweptFreeBytes(p);
4402 space->set_end_of_unswept_pages(p);
4405 case CONCURRENT_PRECISE:
4406 case PARALLEL_PRECISE:
4407 if (!parallel_sweeping_active) {
4408 if (FLAG_gc_verbose) {
4409 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4410 reinterpret_cast<intptr_t>(p));
4412 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4413 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4415 parallel_sweeping_active = true;
4417 if (FLAG_gc_verbose) {
4418 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4419 reinterpret_cast<intptr_t>(p));
4421 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4422 space->IncreaseUnsweptFreeBytes(p);
4424 space->set_end_of_unswept_pages(p);
4427 if (FLAG_gc_verbose) {
4428 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4429 reinterpret_cast<intptr_t>(p));
4431 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4432 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4433 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4434 } else if (space->identity() == CODE_SPACE) {
4435 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4436 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4438 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4439 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4444 default: { UNREACHABLE(); }
4448 if (FLAG_gc_verbose) {
4449 PrintF("SweepSpace: %s (%d pages swept)\n",
4450 AllocationSpaceName(space->identity()), pages_swept);
4453 // Give pages that are queued to be freed back to the OS.
4454 heap()->FreeQueuedChunks();
4458 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
4459 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4460 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
4461 type == MarkCompactCollector::PARALLEL_PRECISE ||
4462 type == MarkCompactCollector::CONCURRENT_PRECISE;
4466 static bool ShouldWaitForSweeperThreads(
4467 MarkCompactCollector::SweeperType type) {
4468 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4469 type == MarkCompactCollector::PARALLEL_PRECISE;
4473 void MarkCompactCollector::SweepSpaces() {
4474 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4475 double start_time = 0.0;
4476 if (FLAG_print_cumulative_gc_stat) {
4477 start_time = base::OS::TimeCurrentMillis();
4481 state_ = SWEEP_SPACES;
4483 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
4484 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4485 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4486 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
4487 how_to_sweep = PARALLEL_PRECISE;
4489 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
4490 how_to_sweep = CONCURRENT_PRECISE;
4492 if (sweep_precisely_) how_to_sweep = PRECISE;
4494 MoveEvacuationCandidatesToEndOfPagesList();
4496 // Noncompacting collections simply sweep the spaces to clear the mark
4497 // bits and free the nonlive blocks (for old and map spaces). We sweep
4498 // the map space last because freeing non-live maps overwrites them and
4499 // the other spaces rely on possibly non-live maps to get the sizes for
4500 // non-live objects.
4502 GCTracer::Scope sweep_scope(heap()->tracer(),
4503 GCTracer::Scope::MC_SWEEP_OLDSPACE);
4505 SequentialSweepingScope scope(this);
4506 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4507 SweepSpace(heap()->old_data_space(), how_to_sweep);
4510 if (ShouldStartSweeperThreads(how_to_sweep)) {
4511 StartSweeperThreads();
4514 if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4515 EnsureSweepingCompleted();
4518 RemoveDeadInvalidatedCode();
4521 GCTracer::Scope sweep_scope(heap()->tracer(),
4522 GCTracer::Scope::MC_SWEEP_CODE);
4523 SweepSpace(heap()->code_space(), PRECISE);
4527 GCTracer::Scope sweep_scope(heap()->tracer(),
4528 GCTracer::Scope::MC_SWEEP_CELL);
4529 SweepSpace(heap()->cell_space(), PRECISE);
4530 SweepSpace(heap()->property_cell_space(), PRECISE);
4533 EvacuateNewSpaceAndCandidates();
4535 // ClearNonLiveTransitions depends on precise sweeping of map space to
4536 // detect whether unmarked map became dead in this collection or in one
4537 // of the previous ones.
4539 GCTracer::Scope sweep_scope(heap()->tracer(),
4540 GCTracer::Scope::MC_SWEEP_MAP);
4541 SweepSpace(heap()->map_space(), PRECISE);
4544 // Deallocate unmarked objects and clear marked bits for marked objects.
4545 heap_->lo_space()->FreeUnmarkedObjects();
4547 // Deallocate evacuated candidate pages.
4548 ReleaseEvacuationCandidates();
4550 if (FLAG_print_cumulative_gc_stat) {
4551 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4557 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4558 PageIterator it(space);
4559 while (it.has_next()) {
4560 Page* p = it.next();
4561 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4562 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4563 if (space->swept_precisely()) {
4564 p->MarkSweptPrecisely();
4566 p->MarkSweptConservatively();
4569 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4574 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4575 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4576 ParallelSweepSpaceComplete(heap()->old_data_space());
4580 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4581 if (isolate()->debug()->is_loaded() ||
4582 isolate()->debug()->has_break_points()) {
4587 if (code_flusher_ != NULL) return;
4588 code_flusher_ = new CodeFlusher(isolate());
4590 if (code_flusher_ == NULL) return;
4591 code_flusher_->EvictAllCandidates();
4592 delete code_flusher_;
4593 code_flusher_ = NULL;
4596 if (FLAG_trace_code_flushing) {
4597 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4602 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4603 // Our profiling tools do not expect intersections between
4604 // code objects. We should either reenable it or change our tools.
4605 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4607 if (obj->IsCode()) {
4608 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4613 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4616 void MarkCompactCollector::Initialize() {
4617 MarkCompactMarkingVisitor::Initialize();
4618 IncrementalMarking::Initialize();
4622 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4623 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4627 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4628 SlotsBuffer** buffer_address, SlotType type,
4629 Address addr, AdditionMode mode) {
4630 SlotsBuffer* buffer = *buffer_address;
4631 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4632 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4633 allocator->DeallocateChain(buffer_address);
4636 buffer = allocator->AllocateBuffer(buffer);
4637 *buffer_address = buffer;
4639 DCHECK(buffer->HasSpaceForTypedSlot());
4640 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4641 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4646 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4647 if (RelocInfo::IsCodeTarget(rmode)) {
4648 return SlotsBuffer::CODE_TARGET_SLOT;
4649 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4650 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4651 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4652 return SlotsBuffer::DEBUG_TARGET_SLOT;
4653 } else if (RelocInfo::IsJSReturn(rmode)) {
4654 return SlotsBuffer::JS_RETURN_SLOT;
4657 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4661 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4662 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4663 RelocInfo::Mode rmode = rinfo->rmode();
4664 if (target_page->IsEvacuationCandidate() &&
4665 (rinfo->host() == NULL ||
4666 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4668 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4669 // This doesn't need to be typed since it is just a normal heap pointer.
4670 Object** target_pointer =
4671 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4672 success = SlotsBuffer::AddTo(
4673 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4674 target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4675 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4676 success = SlotsBuffer::AddTo(
4677 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4678 SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4679 SlotsBuffer::FAIL_ON_OVERFLOW);
4681 success = SlotsBuffer::AddTo(
4682 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4683 SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
4686 EvictEvacuationCandidate(target_page);
4692 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4693 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4694 if (target_page->IsEvacuationCandidate() &&
4695 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4696 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4697 target_page->slots_buffer_address(),
4698 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4699 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4700 EvictEvacuationCandidate(target_page);
4706 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4707 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4708 if (is_compacting()) {
4710 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4712 MarkBit mark_bit = Marking::MarkBitFrom(host);
4713 if (Marking::IsBlack(mark_bit)) {
4714 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4715 RecordRelocSlot(&rinfo, target);
4721 static inline SlotsBuffer::SlotType DecodeSlotType(
4722 SlotsBuffer::ObjectSlot slot) {
4723 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4727 void SlotsBuffer::UpdateSlots(Heap* heap) {
4728 PointersUpdatingVisitor v(heap);
4730 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4731 ObjectSlot slot = slots_[slot_idx];
4732 if (!IsTypedSlot(slot)) {
4733 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4736 DCHECK(slot_idx < idx_);
4737 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4738 reinterpret_cast<Address>(slots_[slot_idx]));
4744 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4745 PointersUpdatingVisitor v(heap);
4747 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4748 ObjectSlot slot = slots_[slot_idx];
4749 if (!IsTypedSlot(slot)) {
4750 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4751 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4755 DCHECK(slot_idx < idx_);
4756 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4757 if (!IsOnInvalidatedCodeObject(pc)) {
4758 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4759 reinterpret_cast<Address>(slots_[slot_idx]));
4766 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4767 return new SlotsBuffer(next_buffer);
4771 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4776 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4777 SlotsBuffer* buffer = *buffer_address;
4778 while (buffer != NULL) {
4779 SlotsBuffer* next_buffer = buffer->next();
4780 DeallocateBuffer(buffer);
4781 buffer = next_buffer;
4783 *buffer_address = NULL;
4786 } // namespace v8::internal