1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h"
11 #include "src/deoptimizer.h"
12 #include "src/execution.h"
13 #include "src/gdb-jit.h"
14 #include "src/global-handles.h"
15 #include "src/heap/incremental-marking.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/objects-visiting.h"
18 #include "src/heap/objects-visiting-inl.h"
19 #include "src/heap/spaces-inl.h"
20 #include "src/heap/sweeper-thread.h"
21 #include "src/heap-profiler.h"
22 #include "src/ic-inl.h"
23 #include "src/stub-cache.h"
29 const char* Marking::kWhiteBitPattern = "00";
30 const char* Marking::kBlackBitPattern = "10";
31 const char* Marking::kGreyBitPattern = "11";
32 const char* Marking::kImpossibleBitPattern = "01";
35 // -------------------------------------------------------------------------
36 // MarkCompactCollector
38 MarkCompactCollector::MarkCompactCollector(Heap* heap)
43 sweep_precisely_(false),
44 reduce_memory_footprint_(false),
45 abort_incremental_marking_(false),
46 marking_parity_(ODD_MARKING_PARITY),
48 was_marked_incrementally_(false),
49 sweeping_in_progress_(false),
50 pending_sweeper_jobs_semaphore_(0),
51 sequential_sweeping_(false),
52 migration_slots_buffer_(NULL),
55 have_code_to_deoptimize_(false) {
59 class VerifyMarkingVisitor : public ObjectVisitor {
61 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
63 void VisitPointers(Object** start, Object** end) {
64 for (Object** current = start; current < end; current++) {
65 if ((*current)->IsHeapObject()) {
66 HeapObject* object = HeapObject::cast(*current);
67 CHECK(heap_->mark_compact_collector()->IsMarked(object));
72 void VisitEmbeddedPointer(RelocInfo* rinfo) {
73 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
74 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
75 Object* p = rinfo->target_object();
80 void VisitCell(RelocInfo* rinfo) {
81 Code* code = rinfo->host();
82 DCHECK(rinfo->rmode() == RelocInfo::CELL);
83 if (!code->IsWeakObject(rinfo->target_cell())) {
84 ObjectVisitor::VisitCell(rinfo);
93 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
94 VerifyMarkingVisitor visitor(heap);
96 Address next_object_must_be_here_or_later = bottom;
98 for (Address current = bottom; current < top; current += kPointerSize) {
99 object = HeapObject::FromAddress(current);
100 if (MarkCompactCollector::IsMarked(object)) {
101 CHECK(current >= next_object_must_be_here_or_later);
102 object->Iterate(&visitor);
103 next_object_must_be_here_or_later = current + object->Size();
109 static void VerifyMarking(NewSpace* space) {
110 Address end = space->top();
111 NewSpacePageIterator it(space->bottom(), end);
112 // The bottom position is at the start of its page. Allows us to use
113 // page->area_start() as start of range on all pages.
114 CHECK_EQ(space->bottom(),
115 NewSpacePage::FromAddress(space->bottom())->area_start());
116 while (it.has_next()) {
117 NewSpacePage* page = it.next();
118 Address limit = it.has_next() ? page->area_end() : end;
119 CHECK(limit == end || !page->Contains(end));
120 VerifyMarking(space->heap(), page->area_start(), limit);
125 static void VerifyMarking(PagedSpace* space) {
126 PageIterator it(space);
128 while (it.has_next()) {
130 VerifyMarking(space->heap(), p->area_start(), p->area_end());
135 static void VerifyMarking(Heap* heap) {
136 VerifyMarking(heap->old_pointer_space());
137 VerifyMarking(heap->old_data_space());
138 VerifyMarking(heap->code_space());
139 VerifyMarking(heap->cell_space());
140 VerifyMarking(heap->property_cell_space());
141 VerifyMarking(heap->map_space());
142 VerifyMarking(heap->new_space());
144 VerifyMarkingVisitor visitor(heap);
146 LargeObjectIterator it(heap->lo_space());
147 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
148 if (MarkCompactCollector::IsMarked(obj)) {
149 obj->Iterate(&visitor);
153 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
157 class VerifyEvacuationVisitor : public ObjectVisitor {
159 void VisitPointers(Object** start, Object** end) {
160 for (Object** current = start; current < end; current++) {
161 if ((*current)->IsHeapObject()) {
162 HeapObject* object = HeapObject::cast(*current);
163 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
170 static void VerifyEvacuation(Page* page) {
171 VerifyEvacuationVisitor visitor;
172 HeapObjectIterator iterator(page, NULL);
173 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
174 heap_object = iterator.Next()) {
175 // We skip free space objects.
176 if (!heap_object->IsFiller()) {
177 heap_object->Iterate(&visitor);
183 static void VerifyEvacuation(NewSpace* space) {
184 NewSpacePageIterator it(space->bottom(), space->top());
185 VerifyEvacuationVisitor visitor;
187 while (it.has_next()) {
188 NewSpacePage* page = it.next();
189 Address current = page->area_start();
190 Address limit = it.has_next() ? page->area_end() : space->top();
191 CHECK(limit == space->top() || !page->Contains(space->top()));
192 while (current < limit) {
193 HeapObject* object = HeapObject::FromAddress(current);
194 object->Iterate(&visitor);
195 current += object->Size();
201 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
202 if (!space->swept_precisely()) return;
203 if (FLAG_use_allocation_folding &&
204 (space == heap->old_pointer_space() || space == heap->old_data_space())) {
207 PageIterator it(space);
209 while (it.has_next()) {
211 if (p->IsEvacuationCandidate()) continue;
217 static void VerifyEvacuation(Heap* heap) {
218 VerifyEvacuation(heap, heap->old_pointer_space());
219 VerifyEvacuation(heap, heap->old_data_space());
220 VerifyEvacuation(heap, heap->code_space());
221 VerifyEvacuation(heap, heap->cell_space());
222 VerifyEvacuation(heap, heap->property_cell_space());
223 VerifyEvacuation(heap, heap->map_space());
224 VerifyEvacuation(heap->new_space());
226 VerifyEvacuationVisitor visitor;
227 heap->IterateStrongRoots(&visitor, VISIT_ALL);
229 #endif // VERIFY_HEAP
233 class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
235 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
237 void VisitPointers(Object** start, Object** end) {
238 for (Object** current = start; current < end; current++) {
239 if ((*current)->IsHeapObject()) {
240 HeapObject* object = HeapObject::cast(*current);
241 if (object->IsString()) continue;
242 switch (object->map()->instance_type()) {
243 case JS_FUNCTION_TYPE:
244 CheckContext(JSFunction::cast(object)->context());
246 case JS_GLOBAL_PROXY_TYPE:
247 CheckContext(JSGlobalProxy::cast(object)->native_context());
249 case JS_GLOBAL_OBJECT_TYPE:
250 case JS_BUILTINS_OBJECT_TYPE:
251 CheckContext(GlobalObject::cast(object)->native_context());
260 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
263 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
264 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
266 case FIXED_ARRAY_TYPE:
267 if (object->IsContext()) {
268 CheckContext(object);
270 FixedArray* array = FixedArray::cast(object);
271 int length = array->length();
272 // Set array length to zero to prevent cycles while iterating
273 // over array bodies, this is easier than intrusive marking.
274 array->set_length(0);
275 array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
277 array->set_length(length);
283 case TYPE_FEEDBACK_INFO_TYPE:
284 object->Iterate(this);
286 case DECLARED_ACCESSOR_INFO_TYPE:
287 case EXECUTABLE_ACCESSOR_INFO_TYPE:
288 case BYTE_ARRAY_TYPE:
289 case CALL_HANDLER_INFO_TYPE:
291 case FIXED_DOUBLE_ARRAY_TYPE:
292 case HEAP_NUMBER_TYPE:
293 case MUTABLE_HEAP_NUMBER_TYPE:
294 case INTERCEPTOR_INFO_TYPE:
297 case SHARED_FUNCTION_INFO_TYPE:
307 void CheckContext(Object* context) {
308 if (!context->IsContext()) return;
309 Context* native_context = Context::cast(context)->native_context();
310 if (current_native_context_ == NULL) {
311 current_native_context_ = native_context;
313 CHECK_EQ(current_native_context_, native_context);
317 Context* current_native_context_;
321 static void VerifyNativeContextSeparation(Heap* heap) {
322 HeapObjectIterator it(heap->code_space());
324 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
325 VerifyNativeContextSeparationVisitor visitor;
326 Code::cast(object)->CodeIterateBody(&visitor);
332 void MarkCompactCollector::SetUp() {
333 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
334 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
338 void MarkCompactCollector::TearDown() { AbortCompaction(); }
341 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
342 p->MarkEvacuationCandidate();
343 evacuation_candidates_.Add(p);
347 static void TraceFragmentation(PagedSpace* space) {
348 int number_of_pages = space->CountTotalPages();
349 intptr_t reserved = (number_of_pages * space->AreaSize());
350 intptr_t free = reserved - space->SizeOfObjects();
351 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
352 AllocationSpaceName(space->identity()), number_of_pages,
353 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
357 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
359 DCHECK(evacuation_candidates_.length() == 0);
361 #ifdef ENABLE_GDB_JIT_INTERFACE
362 // If GDBJIT interface is active disable compaction.
363 if (FLAG_gdbjit) return false;
366 CollectEvacuationCandidates(heap()->old_pointer_space());
367 CollectEvacuationCandidates(heap()->old_data_space());
369 if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
370 FLAG_incremental_code_compaction)) {
371 CollectEvacuationCandidates(heap()->code_space());
372 } else if (FLAG_trace_fragmentation) {
373 TraceFragmentation(heap()->code_space());
376 if (FLAG_trace_fragmentation) {
377 TraceFragmentation(heap()->map_space());
378 TraceFragmentation(heap()->cell_space());
379 TraceFragmentation(heap()->property_cell_space());
382 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
383 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
384 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
386 compacting_ = evacuation_candidates_.length() > 0;
393 void MarkCompactCollector::CollectGarbage() {
394 // Make sure that Prepare() has been called. The individual steps below will
395 // update the state as they proceed.
396 DCHECK(state_ == PREPARE_GC);
399 DCHECK(heap_->incremental_marking()->IsStopped());
401 if (FLAG_collect_maps) ClearNonLiveReferences();
403 ClearWeakCollections();
406 if (FLAG_verify_heap) {
407 VerifyMarking(heap_);
414 if (FLAG_verify_native_context_separation) {
415 VerifyNativeContextSeparation(heap_);
420 if (heap()->weak_embedded_objects_verification_enabled()) {
421 VerifyWeakEmbeddedObjectsInCode();
423 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
424 VerifyOmittedMapChecks();
430 if (marking_parity_ == EVEN_MARKING_PARITY) {
431 marking_parity_ = ODD_MARKING_PARITY;
433 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
434 marking_parity_ = EVEN_MARKING_PARITY;
440 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
441 PageIterator it(space);
443 while (it.has_next()) {
445 CHECK(p->markbits()->IsClean());
446 CHECK_EQ(0, p->LiveBytes());
451 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
452 NewSpacePageIterator it(space->bottom(), space->top());
454 while (it.has_next()) {
455 NewSpacePage* p = it.next();
456 CHECK(p->markbits()->IsClean());
457 CHECK_EQ(0, p->LiveBytes());
462 void MarkCompactCollector::VerifyMarkbitsAreClean() {
463 VerifyMarkbitsAreClean(heap_->old_pointer_space());
464 VerifyMarkbitsAreClean(heap_->old_data_space());
465 VerifyMarkbitsAreClean(heap_->code_space());
466 VerifyMarkbitsAreClean(heap_->cell_space());
467 VerifyMarkbitsAreClean(heap_->property_cell_space());
468 VerifyMarkbitsAreClean(heap_->map_space());
469 VerifyMarkbitsAreClean(heap_->new_space());
471 LargeObjectIterator it(heap_->lo_space());
472 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
473 MarkBit mark_bit = Marking::MarkBitFrom(obj);
474 CHECK(Marking::IsWhite(mark_bit));
475 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
480 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
481 HeapObjectIterator code_iterator(heap()->code_space());
482 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
483 obj = code_iterator.Next()) {
484 Code* code = Code::cast(obj);
485 if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
486 if (WillBeDeoptimized(code)) continue;
487 code->VerifyEmbeddedObjectsDependency();
492 void MarkCompactCollector::VerifyOmittedMapChecks() {
493 HeapObjectIterator iterator(heap()->map_space());
494 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
495 Map* map = Map::cast(obj);
496 map->VerifyOmittedMapChecks();
499 #endif // VERIFY_HEAP
502 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
503 PageIterator it(space);
505 while (it.has_next()) {
506 Bitmap::Clear(it.next());
511 static void ClearMarkbitsInNewSpace(NewSpace* space) {
512 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
514 while (it.has_next()) {
515 Bitmap::Clear(it.next());
520 void MarkCompactCollector::ClearMarkbits() {
521 ClearMarkbitsInPagedSpace(heap_->code_space());
522 ClearMarkbitsInPagedSpace(heap_->map_space());
523 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
524 ClearMarkbitsInPagedSpace(heap_->old_data_space());
525 ClearMarkbitsInPagedSpace(heap_->cell_space());
526 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
527 ClearMarkbitsInNewSpace(heap_->new_space());
529 LargeObjectIterator it(heap_->lo_space());
530 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
531 MarkBit mark_bit = Marking::MarkBitFrom(obj);
533 mark_bit.Next().Clear();
534 Page::FromAddress(obj->address())->ResetProgressBar();
535 Page::FromAddress(obj->address())->ResetLiveBytes();
540 class MarkCompactCollector::SweeperTask : public v8::Task {
542 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
544 virtual ~SweeperTask() {}
547 // v8::Task overrides.
548 virtual void Run() V8_OVERRIDE {
549 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
550 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
556 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
560 void MarkCompactCollector::StartSweeperThreads() {
561 DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
562 DCHECK(free_list_old_data_space_.get()->IsEmpty());
563 sweeping_in_progress_ = true;
564 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
565 isolate()->sweeper_threads()[i]->StartSweeping();
567 if (FLAG_job_based_sweeping) {
568 V8::GetCurrentPlatform()->CallOnBackgroundThread(
569 new SweeperTask(heap(), heap()->old_data_space()),
570 v8::Platform::kShortRunningTask);
571 V8::GetCurrentPlatform()->CallOnBackgroundThread(
572 new SweeperTask(heap(), heap()->old_pointer_space()),
573 v8::Platform::kShortRunningTask);
578 void MarkCompactCollector::EnsureSweepingCompleted() {
579 DCHECK(sweeping_in_progress_ == true);
581 // If sweeping is not completed, we try to complete it here. If we do not
582 // have sweeper threads we have to complete since we do not have a good
583 // indicator for a swept space in that case.
584 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
585 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
586 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
589 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
590 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
592 if (FLAG_job_based_sweeping) {
593 // Wait twice for both jobs.
594 pending_sweeper_jobs_semaphore_.Wait();
595 pending_sweeper_jobs_semaphore_.Wait();
597 ParallelSweepSpacesComplete();
598 sweeping_in_progress_ = false;
599 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
600 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
601 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
602 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
605 if (FLAG_verify_heap) {
606 VerifyEvacuation(heap_);
612 bool MarkCompactCollector::IsSweepingCompleted() {
613 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
614 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
619 if (FLAG_job_based_sweeping) {
620 if (!pending_sweeper_jobs_semaphore_.WaitFor(
621 base::TimeDelta::FromSeconds(0))) {
624 pending_sweeper_jobs_semaphore_.Signal();
631 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
634 if (space == heap()->old_pointer_space()) {
635 free_list = free_list_old_pointer_space_.get();
636 } else if (space == heap()->old_data_space()) {
637 free_list = free_list_old_data_space_.get();
639 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
640 // to only refill them for old data and pointer spaces.
644 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
645 space->AddToAccountingStats(freed_bytes);
646 space->DecrementUnsweptFreeBytes(freed_bytes);
650 bool MarkCompactCollector::AreSweeperThreadsActivated() {
651 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
655 void Marking::TransferMark(Address old_start, Address new_start) {
656 // This is only used when resizing an object.
657 DCHECK(MemoryChunk::FromAddress(old_start) ==
658 MemoryChunk::FromAddress(new_start));
660 if (!heap_->incremental_marking()->IsMarking()) return;
662 // If the mark doesn't move, we don't check the color of the object.
663 // It doesn't matter whether the object is black, since it hasn't changed
664 // size, so the adjustment to the live data count will be zero anyway.
665 if (old_start == new_start) return;
667 MarkBit new_mark_bit = MarkBitFrom(new_start);
668 MarkBit old_mark_bit = MarkBitFrom(old_start);
671 ObjectColor old_color = Color(old_mark_bit);
674 if (Marking::IsBlack(old_mark_bit)) {
675 old_mark_bit.Clear();
676 DCHECK(IsWhite(old_mark_bit));
677 Marking::MarkBlack(new_mark_bit);
679 } else if (Marking::IsGrey(old_mark_bit)) {
680 old_mark_bit.Clear();
681 old_mark_bit.Next().Clear();
682 DCHECK(IsWhite(old_mark_bit));
683 heap_->incremental_marking()->WhiteToGreyAndPush(
684 HeapObject::FromAddress(new_start), new_mark_bit);
685 heap_->incremental_marking()->RestartIfNotMarking();
689 ObjectColor new_color = Color(new_mark_bit);
690 DCHECK(new_color == old_color);
695 const char* AllocationSpaceName(AllocationSpace space) {
699 case OLD_POINTER_SPACE:
700 return "OLD_POINTER_SPACE";
702 return "OLD_DATA_SPACE";
709 case PROPERTY_CELL_SPACE:
710 return "PROPERTY_CELL_SPACE";
721 // Returns zero for pages that have so little fragmentation that it is not
722 // worth defragmenting them. Otherwise a positive integer that gives an
723 // estimate of fragmentation on an arbitrary scale.
724 static int FreeListFragmentation(PagedSpace* space, Page* p) {
725 // If page was not swept then there are no free list items on it.
726 if (!p->WasSwept()) {
727 if (FLAG_trace_fragmentation) {
728 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
729 AllocationSpaceName(space->identity()), p->LiveBytes());
734 PagedSpace::SizeStats sizes;
735 space->ObtainFreeListStatistics(p, &sizes);
738 intptr_t ratio_threshold;
739 intptr_t area_size = space->AreaSize();
740 if (space->identity() == CODE_SPACE) {
741 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
742 ratio_threshold = 10;
744 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
745 ratio_threshold = 15;
748 if (FLAG_trace_fragmentation) {
749 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
750 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
751 static_cast<int>(sizes.small_size_),
752 static_cast<double>(sizes.small_size_ * 100) / area_size,
753 static_cast<int>(sizes.medium_size_),
754 static_cast<double>(sizes.medium_size_ * 100) / area_size,
755 static_cast<int>(sizes.large_size_),
756 static_cast<double>(sizes.large_size_ * 100) / area_size,
757 static_cast<int>(sizes.huge_size_),
758 static_cast<double>(sizes.huge_size_ * 100) / area_size,
759 (ratio > ratio_threshold) ? "[fragmented]" : "");
762 if (FLAG_always_compact && sizes.Total() != area_size) {
766 if (ratio <= ratio_threshold) return 0; // Not fragmented.
768 return static_cast<int>(ratio - ratio_threshold);
772 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
773 DCHECK(space->identity() == OLD_POINTER_SPACE ||
774 space->identity() == OLD_DATA_SPACE ||
775 space->identity() == CODE_SPACE);
777 static const int kMaxMaxEvacuationCandidates = 1000;
778 int number_of_pages = space->CountTotalPages();
779 int max_evacuation_candidates =
780 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
782 if (FLAG_stress_compaction || FLAG_always_compact) {
783 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
788 Candidate() : fragmentation_(0), page_(NULL) {}
789 Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
791 int fragmentation() { return fragmentation_; }
792 Page* page() { return page_; }
799 enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
801 CompactionMode mode = COMPACT_FREE_LISTS;
803 intptr_t reserved = number_of_pages * space->AreaSize();
804 intptr_t over_reserved = reserved - space->SizeOfObjects();
805 static const intptr_t kFreenessThreshold = 50;
807 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
808 // If reduction of memory footprint was requested, we are aggressive
809 // about choosing pages to free. We expect that half-empty pages
810 // are easier to compact so slightly bump the limit.
811 mode = REDUCE_MEMORY_FOOTPRINT;
812 max_evacuation_candidates += 2;
816 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
817 // If over-usage is very high (more than a third of the space), we
818 // try to free all mostly empty pages. We expect that almost empty
819 // pages are even easier to compact so bump the limit even more.
820 mode = REDUCE_MEMORY_FOOTPRINT;
821 max_evacuation_candidates *= 2;
824 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
826 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
827 "evacuation candidate limit: %d\n",
828 static_cast<double>(over_reserved) / MB,
829 static_cast<double>(reserved) / MB,
830 static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
833 intptr_t estimated_release = 0;
835 Candidate candidates[kMaxMaxEvacuationCandidates];
837 max_evacuation_candidates =
838 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
841 int fragmentation = 0;
842 Candidate* least = NULL;
844 PageIterator it(space);
845 if (it.has_next()) it.next(); // Never compact the first page.
847 while (it.has_next()) {
849 p->ClearEvacuationCandidate();
851 if (FLAG_stress_compaction) {
852 unsigned int counter = space->heap()->ms_count();
853 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
854 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
855 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
856 // Don't try to release too many pages.
857 if (estimated_release >= over_reserved) {
861 intptr_t free_bytes = 0;
863 if (!p->WasSwept()) {
864 free_bytes = (p->area_size() - p->LiveBytes());
866 PagedSpace::SizeStats sizes;
867 space->ObtainFreeListStatistics(p, &sizes);
868 free_bytes = sizes.Total();
871 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
873 if (free_pct >= kFreenessThreshold) {
874 estimated_release += free_bytes;
875 fragmentation = free_pct;
880 if (FLAG_trace_fragmentation) {
881 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
882 AllocationSpaceName(space->identity()),
883 static_cast<int>(free_bytes),
884 static_cast<double>(free_bytes * 100) / p->area_size(),
885 (fragmentation > 0) ? "[fragmented]" : "");
888 fragmentation = FreeListFragmentation(space, p);
891 if (fragmentation != 0) {
892 if (count < max_evacuation_candidates) {
893 candidates[count++] = Candidate(fragmentation, p);
896 for (int i = 0; i < max_evacuation_candidates; i++) {
898 candidates[i].fragmentation() < least->fragmentation()) {
899 least = candidates + i;
903 if (least->fragmentation() < fragmentation) {
904 *least = Candidate(fragmentation, p);
911 for (int i = 0; i < count; i++) {
912 AddEvacuationCandidate(candidates[i].page());
915 if (count > 0 && FLAG_trace_fragmentation) {
916 PrintF("Collected %d evacuation candidates for space %s\n", count,
917 AllocationSpaceName(space->identity()));
922 void MarkCompactCollector::AbortCompaction() {
924 int npages = evacuation_candidates_.length();
925 for (int i = 0; i < npages; i++) {
926 Page* p = evacuation_candidates_[i];
927 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
928 p->ClearEvacuationCandidate();
929 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
932 evacuation_candidates_.Rewind(0);
933 invalidated_code_.Rewind(0);
935 DCHECK_EQ(0, evacuation_candidates_.length());
939 void MarkCompactCollector::Prepare() {
940 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
943 DCHECK(state_ == IDLE);
947 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
949 if (sweeping_in_progress()) {
950 // Instead of waiting we could also abort the sweeper threads here.
951 EnsureSweepingCompleted();
954 // Clear marking bits if incremental marking is aborted.
955 if (was_marked_incrementally_ && abort_incremental_marking_) {
956 heap()->incremental_marking()->Abort();
958 AbortWeakCollections();
960 was_marked_incrementally_ = false;
963 // Don't start compaction if we are in the middle of incremental
964 // marking cycle. We did not collect any slots.
965 if (!FLAG_never_compact && !was_marked_incrementally_) {
966 StartCompaction(NON_INCREMENTAL_COMPACTION);
969 PagedSpaces spaces(heap());
970 for (PagedSpace* space = spaces.next(); space != NULL;
971 space = spaces.next()) {
972 space->PrepareForMarkCompact();
976 if (!was_marked_incrementally_ && FLAG_verify_heap) {
977 VerifyMarkbitsAreClean();
983 void MarkCompactCollector::Finish() {
985 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
988 // The stub cache is not traversed during GC; clear the cache to
989 // force lazy re-initialization of it. This must be done after the
990 // GC, because it relies on the new address of certain old space
991 // objects (empty string, illegal builtin).
992 isolate()->stub_cache()->Clear();
994 if (have_code_to_deoptimize_) {
995 // Some code objects were marked for deoptimization during the GC.
996 Deoptimizer::DeoptimizeMarkedCode(isolate());
997 have_code_to_deoptimize_ = false;
1002 // -------------------------------------------------------------------------
1003 // Phase 1: tracing and marking live objects.
1004 // before: all objects are in normal state.
1005 // after: a live object's map pointer is marked as '00'.
1007 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1008 // collection. Before marking, all objects are in their normal state. After
1009 // marking, live objects' map pointers are marked indicating that the object
1010 // has been found reachable.
1012 // The marking algorithm is a (mostly) depth-first (because of possible stack
1013 // overflow) traversal of the graph of objects reachable from the roots. It
1014 // uses an explicit stack of pointers rather than recursion. The young
1015 // generation's inactive ('from') space is used as a marking stack. The
1016 // objects in the marking stack are the ones that have been reached and marked
1017 // but their children have not yet been visited.
1019 // The marking stack can overflow during traversal. In that case, we set an
1020 // overflow flag. When the overflow flag is set, we continue marking objects
1021 // reachable from the objects on the marking stack, but no longer push them on
1022 // the marking stack. Instead, we mark them as both marked and overflowed.
1023 // When the stack is in the overflowed state, objects marked as overflowed
1024 // have been reached and marked but their children have not been visited yet.
1025 // After emptying the marking stack, we clear the overflow flag and traverse
1026 // the heap looking for objects marked as overflowed, push them on the stack,
1027 // and continue with marking. This process repeats until all reachable
1028 // objects have been marked.
1030 void CodeFlusher::ProcessJSFunctionCandidates() {
1031 Code* lazy_compile =
1032 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1033 Object* undefined = isolate_->heap()->undefined_value();
1035 JSFunction* candidate = jsfunction_candidates_head_;
1036 JSFunction* next_candidate;
1037 while (candidate != NULL) {
1038 next_candidate = GetNextCandidate(candidate);
1039 ClearNextCandidate(candidate, undefined);
1041 SharedFunctionInfo* shared = candidate->shared();
1043 Code* code = shared->code();
1044 MarkBit code_mark = Marking::MarkBitFrom(code);
1045 if (!code_mark.Get()) {
1046 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1047 PrintF("[code-flushing clears: ");
1048 shared->ShortPrint();
1049 PrintF(" - age: %d]\n", code->GetAge());
1051 shared->set_code(lazy_compile);
1052 candidate->set_code(lazy_compile);
1054 candidate->set_code(code);
1057 // We are in the middle of a GC cycle so the write barrier in the code
1058 // setter did not record the slot update and we have to do that manually.
1059 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1060 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1061 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
1064 Object** shared_code_slot =
1065 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1066 isolate_->heap()->mark_compact_collector()->RecordSlot(
1067 shared_code_slot, shared_code_slot, *shared_code_slot);
1069 candidate = next_candidate;
1072 jsfunction_candidates_head_ = NULL;
1076 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1077 Code* lazy_compile =
1078 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1080 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1081 SharedFunctionInfo* next_candidate;
1082 while (candidate != NULL) {
1083 next_candidate = GetNextCandidate(candidate);
1084 ClearNextCandidate(candidate);
1086 Code* code = candidate->code();
1087 MarkBit code_mark = Marking::MarkBitFrom(code);
1088 if (!code_mark.Get()) {
1089 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1090 PrintF("[code-flushing clears: ");
1091 candidate->ShortPrint();
1092 PrintF(" - age: %d]\n", code->GetAge());
1094 candidate->set_code(lazy_compile);
1097 Object** code_slot =
1098 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1099 isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
1102 candidate = next_candidate;
1105 shared_function_info_candidates_head_ = NULL;
1109 void CodeFlusher::ProcessOptimizedCodeMaps() {
1110 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1112 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1113 SharedFunctionInfo* next_holder;
1115 while (holder != NULL) {
1116 next_holder = GetNextCodeMap(holder);
1117 ClearNextCodeMap(holder);
1119 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1120 int new_length = SharedFunctionInfo::kEntriesStart;
1121 int old_length = code_map->length();
1122 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
1123 i += SharedFunctionInfo::kEntryLength) {
1125 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1126 if (!Marking::MarkBitFrom(code).Get()) continue;
1128 // Move every slot in the entry.
1129 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1130 int dst_index = new_length++;
1131 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1132 Object* object = code_map->get(i + j);
1133 code_map->set(dst_index, object);
1134 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1135 DCHECK(object->IsSmi());
1138 Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1139 isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1145 // Trim the optimized code map if entries have been removed.
1146 if (new_length < old_length) {
1147 holder->TrimOptimizedCodeMap(old_length - new_length);
1150 holder = next_holder;
1153 optimized_code_map_holder_head_ = NULL;
1157 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1158 // Make sure previous flushing decisions are revisited.
1159 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1161 if (FLAG_trace_code_flushing) {
1162 PrintF("[code-flushing abandons function-info: ");
1163 shared_info->ShortPrint();
1167 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1168 SharedFunctionInfo* next_candidate;
1169 if (candidate == shared_info) {
1170 next_candidate = GetNextCandidate(shared_info);
1171 shared_function_info_candidates_head_ = next_candidate;
1172 ClearNextCandidate(shared_info);
1174 while (candidate != NULL) {
1175 next_candidate = GetNextCandidate(candidate);
1177 if (next_candidate == shared_info) {
1178 next_candidate = GetNextCandidate(shared_info);
1179 SetNextCandidate(candidate, next_candidate);
1180 ClearNextCandidate(shared_info);
1184 candidate = next_candidate;
1190 void CodeFlusher::EvictCandidate(JSFunction* function) {
1191 DCHECK(!function->next_function_link()->IsUndefined());
1192 Object* undefined = isolate_->heap()->undefined_value();
1194 // Make sure previous flushing decisions are revisited.
1195 isolate_->heap()->incremental_marking()->RecordWrites(function);
1196 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1198 if (FLAG_trace_code_flushing) {
1199 PrintF("[code-flushing abandons closure: ");
1200 function->shared()->ShortPrint();
1204 JSFunction* candidate = jsfunction_candidates_head_;
1205 JSFunction* next_candidate;
1206 if (candidate == function) {
1207 next_candidate = GetNextCandidate(function);
1208 jsfunction_candidates_head_ = next_candidate;
1209 ClearNextCandidate(function, undefined);
1211 while (candidate != NULL) {
1212 next_candidate = GetNextCandidate(candidate);
1214 if (next_candidate == function) {
1215 next_candidate = GetNextCandidate(function);
1216 SetNextCandidate(candidate, next_candidate);
1217 ClearNextCandidate(function, undefined);
1221 candidate = next_candidate;
1227 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1228 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1229 ->get(SharedFunctionInfo::kNextMapIndex)
1232 // Make sure previous flushing decisions are revisited.
1233 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1235 if (FLAG_trace_code_flushing) {
1236 PrintF("[code-flushing abandons code-map: ");
1237 code_map_holder->ShortPrint();
1241 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1242 SharedFunctionInfo* next_holder;
1243 if (holder == code_map_holder) {
1244 next_holder = GetNextCodeMap(code_map_holder);
1245 optimized_code_map_holder_head_ = next_holder;
1246 ClearNextCodeMap(code_map_holder);
1248 while (holder != NULL) {
1249 next_holder = GetNextCodeMap(holder);
1251 if (next_holder == code_map_holder) {
1252 next_holder = GetNextCodeMap(code_map_holder);
1253 SetNextCodeMap(holder, next_holder);
1254 ClearNextCodeMap(code_map_holder);
1258 holder = next_holder;
1264 void CodeFlusher::EvictJSFunctionCandidates() {
1265 JSFunction* candidate = jsfunction_candidates_head_;
1266 JSFunction* next_candidate;
1267 while (candidate != NULL) {
1268 next_candidate = GetNextCandidate(candidate);
1269 EvictCandidate(candidate);
1270 candidate = next_candidate;
1272 DCHECK(jsfunction_candidates_head_ == NULL);
1276 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1277 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1278 SharedFunctionInfo* next_candidate;
1279 while (candidate != NULL) {
1280 next_candidate = GetNextCandidate(candidate);
1281 EvictCandidate(candidate);
1282 candidate = next_candidate;
1284 DCHECK(shared_function_info_candidates_head_ == NULL);
1288 void CodeFlusher::EvictOptimizedCodeMaps() {
1289 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1290 SharedFunctionInfo* next_holder;
1291 while (holder != NULL) {
1292 next_holder = GetNextCodeMap(holder);
1293 EvictOptimizedCodeMap(holder);
1294 holder = next_holder;
1296 DCHECK(optimized_code_map_holder_head_ == NULL);
1300 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1301 Heap* heap = isolate_->heap();
1303 JSFunction** slot = &jsfunction_candidates_head_;
1304 JSFunction* candidate = jsfunction_candidates_head_;
1305 while (candidate != NULL) {
1306 if (heap->InFromSpace(candidate)) {
1307 v->VisitPointer(reinterpret_cast<Object**>(slot));
1309 candidate = GetNextCandidate(*slot);
1310 slot = GetNextCandidateSlot(*slot);
1315 MarkCompactCollector::~MarkCompactCollector() {
1316 if (code_flusher_ != NULL) {
1317 delete code_flusher_;
1318 code_flusher_ = NULL;
1323 static inline HeapObject* ShortCircuitConsString(Object** p) {
1324 // Optimization: If the heap object pointed to by p is a non-internalized
1325 // cons string whose right substring is HEAP->empty_string, update
1326 // it in place to its left substring. Return the updated value.
1328 // Here we assume that if we change *p, we replace it with a heap object
1329 // (i.e., the left substring of a cons string is always a heap object).
1331 // The check performed is:
1332 // object->IsConsString() && !object->IsInternalizedString() &&
1333 // (ConsString::cast(object)->second() == HEAP->empty_string())
1334 // except the maps for the object and its possible substrings might be
1336 HeapObject* object = HeapObject::cast(*p);
1337 if (!FLAG_clever_optimizations) return object;
1338 Map* map = object->map();
1339 InstanceType type = map->instance_type();
1340 if (!IsShortcutCandidate(type)) return object;
1342 Object* second = reinterpret_cast<ConsString*>(object)->second();
1343 Heap* heap = map->GetHeap();
1344 if (second != heap->empty_string()) {
1348 // Since we don't have the object's start, it is impossible to update the
1349 // page dirty marks. Therefore, we only replace the string with its left
1350 // substring when page dirty marks do not change.
1351 Object* first = reinterpret_cast<ConsString*>(object)->first();
1352 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1355 return HeapObject::cast(first);
1359 class MarkCompactMarkingVisitor
1360 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1362 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
1365 static void ObjectStatsCountFixedArray(
1366 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1367 FixedArraySubInstanceType dictionary_type);
1369 template <MarkCompactMarkingVisitor::VisitorId id>
1370 class ObjectStatsTracker {
1372 static inline void Visit(Map* map, HeapObject* obj);
1375 static void Initialize();
1377 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1378 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1381 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1382 // Mark all objects pointed to in [start, end).
1383 const int kMinRangeForMarkingRecursion = 64;
1384 if (end - start >= kMinRangeForMarkingRecursion) {
1385 if (VisitUnmarkedObjects(heap, start, end)) return;
1386 // We are close to a stack overflow, so just mark the objects.
1388 MarkCompactCollector* collector = heap->mark_compact_collector();
1389 for (Object** p = start; p < end; p++) {
1390 MarkObjectByPointer(collector, start, p);
1394 // Marks the object black and pushes it on the marking stack.
1395 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1396 MarkBit mark = Marking::MarkBitFrom(object);
1397 heap->mark_compact_collector()->MarkObject(object, mark);
1400 // Marks the object black without pushing it on the marking stack.
1401 // Returns true if object needed marking and false otherwise.
1402 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1403 MarkBit mark_bit = Marking::MarkBitFrom(object);
1404 if (!mark_bit.Get()) {
1405 heap->mark_compact_collector()->SetMark(object, mark_bit);
1411 // Mark object pointed to by p.
1412 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1413 Object** anchor_slot, Object** p)) {
1414 if (!(*p)->IsHeapObject()) return;
1415 HeapObject* object = ShortCircuitConsString(p);
1416 collector->RecordSlot(anchor_slot, p, object);
1417 MarkBit mark = Marking::MarkBitFrom(object);
1418 collector->MarkObject(object, mark);
1422 // Visit an unmarked object.
1423 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1426 DCHECK(collector->heap()->Contains(obj));
1427 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1429 Map* map = obj->map();
1430 Heap* heap = obj->GetHeap();
1431 MarkBit mark = Marking::MarkBitFrom(obj);
1432 heap->mark_compact_collector()->SetMark(obj, mark);
1433 // Mark the map pointer and the body.
1434 MarkBit map_mark = Marking::MarkBitFrom(map);
1435 heap->mark_compact_collector()->MarkObject(map, map_mark);
1436 IterateBody(map, obj);
1439 // Visit all unmarked objects pointed to by [start, end).
1440 // Returns false if the operation fails (lack of stack space).
1441 INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1443 // Return false is we are close to the stack limit.
1444 StackLimitCheck check(heap->isolate());
1445 if (check.HasOverflowed()) return false;
1447 MarkCompactCollector* collector = heap->mark_compact_collector();
1448 // Visit the unmarked objects.
1449 for (Object** p = start; p < end; p++) {
1451 if (!o->IsHeapObject()) continue;
1452 collector->RecordSlot(start, p, o);
1453 HeapObject* obj = HeapObject::cast(o);
1454 MarkBit mark = Marking::MarkBitFrom(obj);
1455 if (mark.Get()) continue;
1456 VisitUnmarkedObject(collector, obj);
1463 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1465 // Code flushing support.
1467 static const int kRegExpCodeThreshold = 5;
1469 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1471 // Make sure that the fixed array is in fact initialized on the RegExp.
1472 // We could potentially trigger a GC when initializing the RegExp.
1473 if (HeapObject::cast(re->data())->map()->instance_type() !=
1477 // Make sure this is a RegExp that actually contains code.
1478 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1480 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1481 if (!code->IsSmi() &&
1482 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1483 // Save a copy that can be reinstated if we need the code again.
1484 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1486 // Saving a copy might create a pointer into compaction candidate
1487 // that was not observed by marker. This might happen if JSRegExp data
1488 // was marked through the compilation cache before marker reached JSRegExp
1490 FixedArray* data = FixedArray::cast(re->data());
1491 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1492 heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1494 // Set a number in the 0-255 range to guarantee no smi overflow.
1495 re->SetDataAt(JSRegExp::code_index(is_ascii),
1496 Smi::FromInt(heap->sweep_generation() & 0xff));
1497 } else if (code->IsSmi()) {
1498 int value = Smi::cast(code)->value();
1499 // The regexp has not been compiled yet or there was a compilation error.
1500 if (value == JSRegExp::kUninitializedValue ||
1501 value == JSRegExp::kCompilationErrorValue) {
1505 // Check if we should flush now.
1506 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1507 re->SetDataAt(JSRegExp::code_index(is_ascii),
1508 Smi::FromInt(JSRegExp::kUninitializedValue));
1509 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1510 Smi::FromInt(JSRegExp::kUninitializedValue));
1516 // Works by setting the current sweep_generation (as a smi) in the
1517 // code object place in the data array of the RegExp and keeps a copy
1518 // around that can be reinstated if we reuse the RegExp before flushing.
1519 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1520 // we flush the code.
1521 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1522 Heap* heap = map->GetHeap();
1523 MarkCompactCollector* collector = heap->mark_compact_collector();
1524 if (!collector->is_code_flushing_enabled()) {
1525 VisitJSRegExp(map, object);
1528 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1529 // Flush code or set age on both ASCII and two byte code.
1530 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1531 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1532 // Visit the fields of the RegExp, including the updated FixedArray.
1533 VisitJSRegExp(map, object);
1536 static VisitorDispatchTable<Callback> non_count_table_;
1540 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1541 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1542 FixedArraySubInstanceType dictionary_type) {
1543 Heap* heap = fixed_array->map()->GetHeap();
1544 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1545 fixed_array->map() != heap->fixed_double_array_map() &&
1546 fixed_array != heap->empty_fixed_array()) {
1547 if (fixed_array->IsDictionary()) {
1548 heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1550 heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1556 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1557 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1558 Heap* heap = map->GetHeap();
1559 int object_size = obj->Size();
1560 heap->RecordObjectStats(map->instance_type(), object_size);
1561 non_count_table_.GetVisitorById(id)(map, obj);
1562 if (obj->IsJSObject()) {
1563 JSObject* object = JSObject::cast(obj);
1564 ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1565 FAST_ELEMENTS_SUB_TYPE);
1566 ObjectStatsCountFixedArray(object->properties(),
1567 DICTIONARY_PROPERTIES_SUB_TYPE,
1568 FAST_PROPERTIES_SUB_TYPE);
1573 template <MarkCompactMarkingVisitor::VisitorId id>
1574 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
1576 ObjectStatsVisitBase(id, map, obj);
1581 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1582 MarkCompactMarkingVisitor::kVisitMap> {
1584 static inline void Visit(Map* map, HeapObject* obj) {
1585 Heap* heap = map->GetHeap();
1586 Map* map_obj = Map::cast(obj);
1587 DCHECK(map->instance_type() == MAP_TYPE);
1588 DescriptorArray* array = map_obj->instance_descriptors();
1589 if (map_obj->owns_descriptors() &&
1590 array != heap->empty_descriptor_array()) {
1591 int fixed_array_size = array->Size();
1592 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1595 if (map_obj->HasTransitionArray()) {
1596 int fixed_array_size = map_obj->transitions()->Size();
1597 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1600 if (map_obj->has_code_cache()) {
1601 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1602 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1603 cache->default_cache()->Size());
1604 if (!cache->normal_type_cache()->IsUndefined()) {
1605 heap->RecordFixedArraySubTypeStats(
1606 MAP_CODE_CACHE_SUB_TYPE,
1607 FixedArray::cast(cache->normal_type_cache())->Size());
1610 ObjectStatsVisitBase(kVisitMap, map, obj);
1616 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1617 MarkCompactMarkingVisitor::kVisitCode> {
1619 static inline void Visit(Map* map, HeapObject* obj) {
1620 Heap* heap = map->GetHeap();
1621 int object_size = obj->Size();
1622 DCHECK(map->instance_type() == CODE_TYPE);
1623 Code* code_obj = Code::cast(obj);
1624 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1626 ObjectStatsVisitBase(kVisitCode, map, obj);
1632 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1633 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1635 static inline void Visit(Map* map, HeapObject* obj) {
1636 Heap* heap = map->GetHeap();
1637 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1638 if (sfi->scope_info() != heap->empty_fixed_array()) {
1639 heap->RecordFixedArraySubTypeStats(
1640 SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1642 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1648 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1649 MarkCompactMarkingVisitor::kVisitFixedArray> {
1651 static inline void Visit(Map* map, HeapObject* obj) {
1652 Heap* heap = map->GetHeap();
1653 FixedArray* fixed_array = FixedArray::cast(obj);
1654 if (fixed_array == heap->string_table()) {
1655 heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1656 fixed_array->Size());
1658 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1663 void MarkCompactMarkingVisitor::Initialize() {
1664 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1666 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1668 if (FLAG_track_gc_object_stats) {
1669 // Copy the visitor table to make call-through possible.
1670 non_count_table_.CopyFrom(&table_);
1671 #define VISITOR_ID_COUNT_FUNCTION(id) \
1672 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1673 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1674 #undef VISITOR_ID_COUNT_FUNCTION
1679 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1680 MarkCompactMarkingVisitor::non_count_table_;
1683 class CodeMarkingVisitor : public ThreadVisitor {
1685 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1686 : collector_(collector) {}
1688 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1689 collector_->PrepareThreadForCodeFlushing(isolate, top);
1693 MarkCompactCollector* collector_;
1697 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1699 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1700 : collector_(collector) {}
1702 void VisitPointers(Object** start, Object** end) {
1703 for (Object** p = start; p < end; p++) VisitPointer(p);
1706 void VisitPointer(Object** slot) {
1707 Object* obj = *slot;
1708 if (obj->IsSharedFunctionInfo()) {
1709 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1710 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1711 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1712 collector_->MarkObject(shared->code(), code_mark);
1713 collector_->MarkObject(shared, shared_mark);
1718 MarkCompactCollector* collector_;
1722 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1723 ThreadLocalTop* top) {
1724 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1725 // Note: for the frame that has a pending lazy deoptimization
1726 // StackFrame::unchecked_code will return a non-optimized code object for
1727 // the outermost function and StackFrame::LookupCode will return
1728 // actual optimized code object.
1729 StackFrame* frame = it.frame();
1730 Code* code = frame->unchecked_code();
1731 MarkBit code_mark = Marking::MarkBitFrom(code);
1732 MarkObject(code, code_mark);
1733 if (frame->is_optimized()) {
1734 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1735 frame->LookupCode());
1741 void MarkCompactCollector::PrepareForCodeFlushing() {
1742 // Enable code flushing for non-incremental cycles.
1743 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1744 EnableCodeFlushing(!was_marked_incrementally_);
1747 // If code flushing is disabled, there is no need to prepare for it.
1748 if (!is_code_flushing_enabled()) return;
1750 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1751 // relies on it being marked before any other descriptor array.
1752 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1753 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1754 MarkObject(descriptor_array, descriptor_array_mark);
1756 // Make sure we are not referencing the code from the stack.
1757 DCHECK(this == heap()->mark_compact_collector());
1758 PrepareThreadForCodeFlushing(heap()->isolate(),
1759 heap()->isolate()->thread_local_top());
1761 // Iterate the archived stacks in all threads to check if
1762 // the code is referenced.
1763 CodeMarkingVisitor code_marking_visitor(this);
1764 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1765 &code_marking_visitor);
1767 SharedFunctionInfoMarkingVisitor visitor(this);
1768 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1769 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1771 ProcessMarkingDeque();
1775 // Visitor class for marking heap roots.
1776 class RootMarkingVisitor : public ObjectVisitor {
1778 explicit RootMarkingVisitor(Heap* heap)
1779 : collector_(heap->mark_compact_collector()) {}
1781 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
1783 void VisitPointers(Object** start, Object** end) {
1784 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1787 // Skip the weak next code link in a code object, which is visited in
1788 // ProcessTopOptimizedFrame.
1789 void VisitNextCodeLink(Object** p) {}
1792 void MarkObjectByPointer(Object** p) {
1793 if (!(*p)->IsHeapObject()) return;
1795 // Replace flat cons strings in place.
1796 HeapObject* object = ShortCircuitConsString(p);
1797 MarkBit mark_bit = Marking::MarkBitFrom(object);
1798 if (mark_bit.Get()) return;
1800 Map* map = object->map();
1802 collector_->SetMark(object, mark_bit);
1804 // Mark the map pointer and body, and push them on the marking stack.
1805 MarkBit map_mark = Marking::MarkBitFrom(map);
1806 collector_->MarkObject(map, map_mark);
1807 MarkCompactMarkingVisitor::IterateBody(map, object);
1809 // Mark all the objects reachable from the map and body. May leave
1810 // overflowed objects in the heap.
1811 collector_->EmptyMarkingDeque();
1814 MarkCompactCollector* collector_;
1818 // Helper class for pruning the string table.
1819 template <bool finalize_external_strings>
1820 class StringTableCleaner : public ObjectVisitor {
1822 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1824 virtual void VisitPointers(Object** start, Object** end) {
1825 // Visit all HeapObject pointers in [start, end).
1826 for (Object** p = start; p < end; p++) {
1828 if (o->IsHeapObject() &&
1829 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1830 if (finalize_external_strings) {
1831 DCHECK(o->IsExternalString());
1832 heap_->FinalizeExternalString(String::cast(*p));
1834 pointers_removed_++;
1836 // Set the entry to the_hole_value (as deleted).
1837 *p = heap_->the_hole_value();
1842 int PointersRemoved() {
1843 DCHECK(!finalize_external_strings);
1844 return pointers_removed_;
1849 int pointers_removed_;
1853 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1854 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1857 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1859 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1861 virtual Object* RetainAs(Object* object) {
1862 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1864 } else if (object->IsAllocationSite() &&
1865 !(AllocationSite::cast(object)->IsZombie())) {
1866 // "dead" AllocationSites need to live long enough for a traversal of new
1867 // space. These sites get a one-time reprieve.
1868 AllocationSite* site = AllocationSite::cast(object);
1870 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1879 // Fill the marking stack with overflowed objects returned by the given
1880 // iterator. Stop when the marking stack is filled or the end of the space
1881 // is reached, whichever comes first.
1883 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1884 MarkingDeque* marking_deque,
1886 // The caller should ensure that the marking stack is initially not full,
1887 // so that we don't waste effort pointlessly scanning for objects.
1888 DCHECK(!marking_deque->IsFull());
1890 Map* filler_map = heap->one_pointer_filler_map();
1891 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1892 MarkBit markbit = Marking::MarkBitFrom(object);
1893 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1894 Marking::GreyToBlack(markbit);
1895 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1896 marking_deque->PushBlack(object);
1897 if (marking_deque->IsFull()) return;
1903 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1906 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1908 DCHECK(!marking_deque->IsFull());
1909 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1910 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1911 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1912 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1914 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1915 Address cell_base = it.CurrentCellBase();
1916 MarkBit::CellType* cell = it.CurrentCell();
1918 const MarkBit::CellType current_cell = *cell;
1919 if (current_cell == 0) continue;
1921 MarkBit::CellType grey_objects;
1923 const MarkBit::CellType next_cell = *(cell + 1);
1924 grey_objects = current_cell & ((current_cell >> 1) |
1925 (next_cell << (Bitmap::kBitsPerCell - 1)));
1927 grey_objects = current_cell & (current_cell >> 1);
1931 while (grey_objects != 0) {
1932 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1933 grey_objects >>= trailing_zeros;
1934 offset += trailing_zeros;
1935 MarkBit markbit(cell, 1 << offset, false);
1936 DCHECK(Marking::IsGrey(markbit));
1937 Marking::GreyToBlack(markbit);
1938 Address addr = cell_base + offset * kPointerSize;
1939 HeapObject* object = HeapObject::FromAddress(addr);
1940 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1941 marking_deque->PushBlack(object);
1942 if (marking_deque->IsFull()) return;
1947 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1952 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1953 NewSpace* new_space, NewSpacePage* p) {
1954 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1955 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1956 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1957 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1959 MarkBit::CellType* cells = p->markbits()->cells();
1960 int survivors_size = 0;
1962 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1963 Address cell_base = it.CurrentCellBase();
1964 MarkBit::CellType* cell = it.CurrentCell();
1966 MarkBit::CellType current_cell = *cell;
1967 if (current_cell == 0) continue;
1970 while (current_cell != 0) {
1971 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
1972 current_cell >>= trailing_zeros;
1973 offset += trailing_zeros;
1974 Address address = cell_base + offset * kPointerSize;
1975 HeapObject* object = HeapObject::FromAddress(address);
1977 int size = object->Size();
1978 survivors_size += size;
1980 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1985 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1986 if (heap()->ShouldBePromoted(object->address(), size) &&
1987 TryPromoteObject(object, size)) {
1991 AllocationResult allocation = new_space->AllocateRaw(size);
1992 if (allocation.IsRetry()) {
1993 if (!new_space->AddFreshPage()) {
1994 // Shouldn't happen. We are sweeping linearly, and to-space
1995 // has the same number of pages as from-space, so there is
1999 allocation = new_space->AllocateRaw(size);
2000 DCHECK(!allocation.IsRetry());
2002 Object* target = allocation.ToObjectChecked();
2004 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
2005 heap()->IncrementSemiSpaceCopiedObjectSize(size);
2009 return survivors_size;
2013 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
2014 PagedSpace* space) {
2015 PageIterator it(space);
2016 while (it.has_next()) {
2017 Page* p = it.next();
2018 DiscoverGreyObjectsOnPage(marking_deque, p);
2019 if (marking_deque->IsFull()) return;
2024 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2025 MarkingDeque* marking_deque) {
2026 NewSpace* space = heap->new_space();
2027 NewSpacePageIterator it(space->bottom(), space->top());
2028 while (it.has_next()) {
2029 NewSpacePage* page = it.next();
2030 DiscoverGreyObjectsOnPage(marking_deque, page);
2031 if (marking_deque->IsFull()) return;
2036 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2038 if (!o->IsHeapObject()) return false;
2039 HeapObject* heap_object = HeapObject::cast(o);
2040 MarkBit mark = Marking::MarkBitFrom(heap_object);
2045 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2048 DCHECK(o->IsHeapObject());
2049 HeapObject* heap_object = HeapObject::cast(o);
2050 MarkBit mark = Marking::MarkBitFrom(heap_object);
2055 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2056 StringTable* string_table = heap()->string_table();
2057 // Mark the string table itself.
2058 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2059 if (!string_table_mark.Get()) {
2060 // String table could have already been marked by visiting the handles list.
2061 SetMark(string_table, string_table_mark);
2063 // Explicitly mark the prefix.
2064 string_table->IteratePrefix(visitor);
2065 ProcessMarkingDeque();
2069 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2070 MarkBit mark_bit = Marking::MarkBitFrom(site);
2071 SetMark(site, mark_bit);
2075 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2076 // Mark the heap roots including global variables, stack variables,
2077 // etc., and all objects reachable from them.
2078 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2080 // Handle the string table specially.
2081 MarkStringTable(visitor);
2083 MarkWeakObjectToCodeTable();
2085 // There may be overflowed objects in the heap. Visit them now.
2086 while (marking_deque_.overflowed()) {
2087 RefillMarkingDeque();
2088 EmptyMarkingDeque();
2093 void MarkCompactCollector::MarkImplicitRefGroups() {
2094 List<ImplicitRefGroup*>* ref_groups =
2095 isolate()->global_handles()->implicit_ref_groups();
2098 for (int i = 0; i < ref_groups->length(); i++) {
2099 ImplicitRefGroup* entry = ref_groups->at(i);
2100 DCHECK(entry != NULL);
2102 if (!IsMarked(*entry->parent)) {
2103 (*ref_groups)[last++] = entry;
2107 Object*** children = entry->children;
2108 // A parent object is marked, so mark all child heap objects.
2109 for (size_t j = 0; j < entry->length; ++j) {
2110 if ((*children[j])->IsHeapObject()) {
2111 HeapObject* child = HeapObject::cast(*children[j]);
2112 MarkBit mark = Marking::MarkBitFrom(child);
2113 MarkObject(child, mark);
2117 // Once the entire group has been marked, dispose it because it's
2118 // not needed anymore.
2121 ref_groups->Rewind(last);
2125 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2126 HeapObject* weak_object_to_code_table =
2127 HeapObject::cast(heap()->weak_object_to_code_table());
2128 if (!IsMarked(weak_object_to_code_table)) {
2129 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2130 SetMark(weak_object_to_code_table, mark);
2135 // Mark all objects reachable from the objects on the marking stack.
2136 // Before: the marking stack contains zero or more heap object pointers.
2137 // After: the marking stack is empty, and all objects reachable from the
2138 // marking stack have been marked, or are overflowed in the heap.
2139 void MarkCompactCollector::EmptyMarkingDeque() {
2140 while (!marking_deque_.IsEmpty()) {
2141 HeapObject* object = marking_deque_.Pop();
2142 DCHECK(object->IsHeapObject());
2143 DCHECK(heap()->Contains(object));
2144 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2146 Map* map = object->map();
2147 MarkBit map_mark = Marking::MarkBitFrom(map);
2148 MarkObject(map, map_mark);
2150 MarkCompactMarkingVisitor::IterateBody(map, object);
2155 // Sweep the heap for overflowed objects, clear their overflow bits, and
2156 // push them on the marking stack. Stop early if the marking stack fills
2157 // before sweeping completes. If sweeping completes, there are no remaining
2158 // overflowed objects in the heap so the overflow flag on the markings stack
2160 void MarkCompactCollector::RefillMarkingDeque() {
2161 DCHECK(marking_deque_.overflowed());
2163 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2164 if (marking_deque_.IsFull()) return;
2166 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2167 heap()->old_pointer_space());
2168 if (marking_deque_.IsFull()) return;
2170 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2171 if (marking_deque_.IsFull()) return;
2173 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2174 if (marking_deque_.IsFull()) return;
2176 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2177 if (marking_deque_.IsFull()) return;
2179 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2180 if (marking_deque_.IsFull()) return;
2182 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2183 heap()->property_cell_space());
2184 if (marking_deque_.IsFull()) return;
2186 LargeObjectIterator lo_it(heap()->lo_space());
2187 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
2188 if (marking_deque_.IsFull()) return;
2190 marking_deque_.ClearOverflowed();
2194 // Mark all objects reachable (transitively) from objects on the marking
2195 // stack. Before: the marking stack contains zero or more heap object
2196 // pointers. After: the marking stack is empty and there are no overflowed
2197 // objects in the heap.
2198 void MarkCompactCollector::ProcessMarkingDeque() {
2199 EmptyMarkingDeque();
2200 while (marking_deque_.overflowed()) {
2201 RefillMarkingDeque();
2202 EmptyMarkingDeque();
2207 // Mark all objects reachable (transitively) from objects on the marking
2208 // stack including references only considered in the atomic marking pause.
2209 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2210 bool work_to_do = true;
2211 DCHECK(marking_deque_.IsEmpty());
2212 while (work_to_do) {
2213 isolate()->global_handles()->IterateObjectGroups(
2214 visitor, &IsUnmarkedHeapObjectWithHeap);
2215 MarkImplicitRefGroups();
2216 ProcessWeakCollections();
2217 work_to_do = !marking_deque_.IsEmpty();
2218 ProcessMarkingDeque();
2223 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2224 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2225 !it.done(); it.Advance()) {
2226 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2229 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2230 Code* code = it.frame()->LookupCode();
2231 if (!code->CanDeoptAt(it.frame()->pc())) {
2232 code->CodeIterateBody(visitor);
2234 ProcessMarkingDeque();
2241 void MarkCompactCollector::MarkLiveObjects() {
2242 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2243 double start_time = 0.0;
2244 if (FLAG_print_cumulative_gc_stat) {
2245 start_time = base::OS::TimeCurrentMillis();
2247 // The recursive GC marker detects when it is nearing stack overflow,
2248 // and switches to a different marking system. JS interrupts interfere
2249 // with the C stack limit check.
2250 PostponeInterruptsScope postpone(isolate());
2252 bool incremental_marking_overflowed = false;
2253 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2254 if (was_marked_incrementally_) {
2255 // Finalize the incremental marking and check whether we had an overflow.
2256 // Both markers use grey color to mark overflowed objects so
2257 // non-incremental marker can deal with them as if overflow
2258 // occured during normal marking.
2259 // But incremental marker uses a separate marking deque
2260 // so we have to explicitly copy its overflow state.
2261 incremental_marking->Finalize();
2262 incremental_marking_overflowed =
2263 incremental_marking->marking_deque()->overflowed();
2264 incremental_marking->marking_deque()->ClearOverflowed();
2266 // Abort any pending incremental activities e.g. incremental sweeping.
2267 incremental_marking->Abort();
2271 DCHECK(state_ == PREPARE_GC);
2272 state_ = MARK_LIVE_OBJECTS;
2274 // The to space contains live objects, a page in from space is used as a
2276 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2277 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2278 if (FLAG_force_marking_deque_overflows) {
2279 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2281 marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2282 DCHECK(!marking_deque_.overflowed());
2284 if (incremental_marking_overflowed) {
2285 // There are overflowed objects left in the heap after incremental marking.
2286 marking_deque_.SetOverflowed();
2289 PrepareForCodeFlushing();
2291 if (was_marked_incrementally_) {
2292 // There is no write barrier on cells so we have to scan them now at the end
2293 // of the incremental marking.
2295 HeapObjectIterator cell_iterator(heap()->cell_space());
2297 while ((cell = cell_iterator.Next()) != NULL) {
2298 DCHECK(cell->IsCell());
2299 if (IsMarked(cell)) {
2300 int offset = Cell::kValueOffset;
2301 MarkCompactMarkingVisitor::VisitPointer(
2302 heap(), reinterpret_cast<Object**>(cell->address() + offset));
2307 HeapObjectIterator js_global_property_cell_iterator(
2308 heap()->property_cell_space());
2310 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2311 DCHECK(cell->IsPropertyCell());
2312 if (IsMarked(cell)) {
2313 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2319 RootMarkingVisitor root_visitor(heap());
2320 MarkRoots(&root_visitor);
2322 ProcessTopOptimizedFrame(&root_visitor);
2324 // The objects reachable from the roots are marked, yet unreachable
2325 // objects are unmarked. Mark objects reachable due to host
2326 // application specific logic or through Harmony weak maps.
2327 ProcessEphemeralMarking(&root_visitor);
2329 // The objects reachable from the roots, weak maps or object groups
2330 // are marked, yet unreachable objects are unmarked. Mark objects
2331 // reachable only from weak global handles.
2333 // First we identify nonlive weak handles and mark them as pending
2335 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2336 &IsUnmarkedHeapObject);
2337 // Then we mark the objects and process the transitive closure.
2338 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2339 while (marking_deque_.overflowed()) {
2340 RefillMarkingDeque();
2341 EmptyMarkingDeque();
2344 // Repeat host application specific and Harmony weak maps marking to
2345 // mark unmarked objects reachable from the weak roots.
2346 ProcessEphemeralMarking(&root_visitor);
2350 if (FLAG_print_cumulative_gc_stat) {
2351 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2356 void MarkCompactCollector::AfterMarking() {
2357 // Object literal map caches reference strings (cache keys) and maps
2358 // (cache values). At this point still useful maps have already been
2359 // marked. Mark the keys for the alive values before we process the
2363 // Prune the string table removing all strings only pointed to by the
2364 // string table. Cannot use string_table() here because the string
2366 StringTable* string_table = heap()->string_table();
2367 InternalizedStringTableCleaner internalized_visitor(heap());
2368 string_table->IterateElements(&internalized_visitor);
2369 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2371 ExternalStringTableCleaner external_visitor(heap());
2372 heap()->external_string_table_.Iterate(&external_visitor);
2373 heap()->external_string_table_.CleanUp();
2375 // Process the weak references.
2376 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2377 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2379 // Remove object groups after marking phase.
2380 heap()->isolate()->global_handles()->RemoveObjectGroups();
2381 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2383 // Flush code from collected candidates.
2384 if (is_code_flushing_enabled()) {
2385 code_flusher_->ProcessCandidates();
2386 // If incremental marker does not support code flushing, we need to
2387 // disable it before incremental marking steps for next cycle.
2388 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2389 EnableCodeFlushing(false);
2393 if (FLAG_track_gc_object_stats) {
2394 heap()->CheckpointObjectStats();
2399 void MarkCompactCollector::ProcessMapCaches() {
2400 Object* raw_context = heap()->native_contexts_list();
2401 while (raw_context != heap()->undefined_value()) {
2402 Context* context = reinterpret_cast<Context*>(raw_context);
2403 if (IsMarked(context)) {
2404 HeapObject* raw_map_cache =
2405 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2406 // A map cache may be reachable from the stack. In this case
2407 // it's already transitively marked and it's too late to clean
2409 if (!IsMarked(raw_map_cache) &&
2410 raw_map_cache != heap()->undefined_value()) {
2411 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2412 int existing_elements = map_cache->NumberOfElements();
2413 int used_elements = 0;
2414 for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
2415 i += MapCache::kEntrySize) {
2416 Object* raw_key = map_cache->get(i);
2417 if (raw_key == heap()->undefined_value() ||
2418 raw_key == heap()->the_hole_value())
2420 STATIC_ASSERT(MapCache::kEntrySize == 2);
2421 Object* raw_map = map_cache->get(i + 1);
2422 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2425 // Delete useless entries with unmarked maps.
2426 DCHECK(raw_map->IsMap());
2427 map_cache->set_the_hole(i);
2428 map_cache->set_the_hole(i + 1);
2431 if (used_elements == 0) {
2432 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2434 // Note: we don't actually shrink the cache here to avoid
2435 // extra complexity during GC. We rely on subsequent cache
2436 // usages (EnsureCapacity) to do this.
2437 map_cache->ElementsRemoved(existing_elements - used_elements);
2438 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2439 MarkObject(map_cache, map_cache_markbit);
2443 // Move to next element in the list.
2444 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2446 ProcessMarkingDeque();
2450 void MarkCompactCollector::ClearNonLiveReferences() {
2451 // Iterate over the map space, setting map transitions that go from
2452 // a marked map to an unmarked map to null transitions. This action
2453 // is carried out only on maps of JSObjects and related subtypes.
2454 HeapObjectIterator map_iterator(heap()->map_space());
2455 for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2456 obj = map_iterator.Next()) {
2457 Map* map = Map::cast(obj);
2459 if (!map->CanTransition()) continue;
2461 MarkBit map_mark = Marking::MarkBitFrom(map);
2462 ClearNonLivePrototypeTransitions(map);
2463 ClearNonLiveMapTransitions(map, map_mark);
2465 if (map_mark.Get()) {
2466 ClearNonLiveDependentCode(map->dependent_code());
2468 ClearDependentCode(map->dependent_code());
2469 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2473 // Iterate over property cell space, removing dependent code that is not
2474 // otherwise kept alive by strong references.
2475 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2476 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
2477 cell = cell_iterator.Next()) {
2478 if (IsMarked(cell)) {
2479 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2483 // Iterate over allocation sites, removing dependent code that is not
2484 // otherwise kept alive by strong references.
2485 Object* undefined = heap()->undefined_value();
2486 for (Object* site = heap()->allocation_sites_list(); site != undefined;
2487 site = AllocationSite::cast(site)->weak_next()) {
2488 if (IsMarked(site)) {
2489 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2493 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2494 WeakHashTable* table =
2495 WeakHashTable::cast(heap_->weak_object_to_code_table());
2496 uint32_t capacity = table->Capacity();
2497 for (uint32_t i = 0; i < capacity; i++) {
2498 uint32_t key_index = table->EntryToIndex(i);
2499 Object* key = table->get(key_index);
2500 if (!table->IsKey(key)) continue;
2501 uint32_t value_index = table->EntryToValueIndex(i);
2502 Object* value = table->get(value_index);
2503 if (key->IsCell() && !IsMarked(key)) {
2504 Cell* cell = Cell::cast(key);
2505 Object* object = cell->value();
2506 if (IsMarked(object)) {
2507 MarkBit mark = Marking::MarkBitFrom(cell);
2508 SetMark(cell, mark);
2509 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2510 RecordSlot(value_slot, value_slot, *value_slot);
2513 if (IsMarked(key)) {
2514 if (!IsMarked(value)) {
2515 HeapObject* obj = HeapObject::cast(value);
2516 MarkBit mark = Marking::MarkBitFrom(obj);
2519 ClearNonLiveDependentCode(DependentCode::cast(value));
2521 ClearDependentCode(DependentCode::cast(value));
2522 table->set(key_index, heap_->the_hole_value());
2523 table->set(value_index, heap_->the_hole_value());
2524 table->ElementRemoved();
2531 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2532 int number_of_transitions = map->NumberOfProtoTransitions();
2533 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2535 int new_number_of_transitions = 0;
2536 const int header = Map::kProtoTransitionHeaderSize;
2537 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2538 const int map_offset = header + Map::kProtoTransitionMapOffset;
2539 const int step = Map::kProtoTransitionElementsPerEntry;
2540 for (int i = 0; i < number_of_transitions; i++) {
2541 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2542 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2543 if (IsMarked(prototype) && IsMarked(cached_map)) {
2544 DCHECK(!prototype->IsUndefined());
2545 int proto_index = proto_offset + new_number_of_transitions * step;
2546 int map_index = map_offset + new_number_of_transitions * step;
2547 if (new_number_of_transitions != i) {
2548 prototype_transitions->set(proto_index, prototype,
2549 UPDATE_WRITE_BARRIER);
2550 prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
2552 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2553 RecordSlot(slot, slot, prototype);
2554 new_number_of_transitions++;
2558 if (new_number_of_transitions != number_of_transitions) {
2559 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2562 // Fill slots that became free with undefined value.
2563 for (int i = new_number_of_transitions * step;
2564 i < number_of_transitions * step; i++) {
2565 prototype_transitions->set_undefined(header + i);
2570 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2572 Object* potential_parent = map->GetBackPointer();
2573 if (!potential_parent->IsMap()) return;
2574 Map* parent = Map::cast(potential_parent);
2576 // Follow back pointer, check whether we are dealing with a map transition
2577 // from a live map to a dead path and in case clear transitions of parent.
2578 bool current_is_alive = map_mark.Get();
2579 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2580 if (!current_is_alive && parent_is_alive) {
2581 ClearMapTransitions(parent);
2586 // Clear a possible back pointer in case the transition leads to a dead map.
2587 // Return true in case a back pointer has been cleared and false otherwise.
2588 bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
2589 if (Marking::MarkBitFrom(target).Get()) return false;
2590 target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2595 void MarkCompactCollector::ClearMapTransitions(Map* map) {
2596 // If there are no transitions to be cleared, return.
2597 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2598 // properly cleared.
2599 if (!map->HasTransitionArray()) return;
2601 TransitionArray* t = map->transitions();
2603 int transition_index = 0;
2605 DescriptorArray* descriptors = map->instance_descriptors();
2606 bool descriptors_owner_died = false;
2608 // Compact all live descriptors to the left.
2609 for (int i = 0; i < t->number_of_transitions(); ++i) {
2610 Map* target = t->GetTarget(i);
2611 if (ClearMapBackPointer(target)) {
2612 if (target->instance_descriptors() == descriptors) {
2613 descriptors_owner_died = true;
2616 if (i != transition_index) {
2617 Name* key = t->GetKey(i);
2618 t->SetKey(transition_index, key);
2619 Object** key_slot = t->GetKeySlot(transition_index);
2620 RecordSlot(key_slot, key_slot, key);
2621 // Target slots do not need to be recorded since maps are not compacted.
2622 t->SetTarget(transition_index, t->GetTarget(i));
2628 // If there are no transitions to be cleared, return.
2629 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2630 // properly cleared.
2631 if (transition_index == t->number_of_transitions()) return;
2633 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2635 if (descriptors_owner_died) {
2636 if (number_of_own_descriptors > 0) {
2637 TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2638 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2639 map->set_owns_descriptors(true);
2641 DCHECK(descriptors == heap_->empty_descriptor_array());
2645 // Note that we never eliminate a transition array, though we might right-trim
2646 // such that number_of_transitions() == 0. If this assumption changes,
2647 // TransitionArray::CopyInsert() will need to deal with the case that a
2648 // transition array disappeared during GC.
2649 int trim = t->number_of_transitions() - transition_index;
2651 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2652 t, t->IsSimpleTransition() ? trim
2653 : trim * TransitionArray::kTransitionSize);
2655 DCHECK(map->HasTransitionArray());
2659 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2660 DescriptorArray* descriptors,
2661 int number_of_own_descriptors) {
2662 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2663 int to_trim = number_of_descriptors - number_of_own_descriptors;
2664 if (to_trim == 0) return;
2666 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2667 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2668 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2670 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2671 descriptors->Sort();
2675 void MarkCompactCollector::TrimEnumCache(Map* map,
2676 DescriptorArray* descriptors) {
2677 int live_enum = map->EnumLength();
2678 if (live_enum == kInvalidEnumCacheSentinel) {
2679 live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2681 if (live_enum == 0) return descriptors->ClearEnumCache();
2683 FixedArray* enum_cache = descriptors->GetEnumCache();
2685 int to_trim = enum_cache->length() - live_enum;
2686 if (to_trim <= 0) return;
2687 heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
2690 if (!descriptors->HasEnumIndicesCache()) return;
2691 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2692 heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2696 void MarkCompactCollector::ClearDependentICList(Object* head) {
2697 Object* current = head;
2698 Object* undefined = heap()->undefined_value();
2699 while (current != undefined) {
2700 Code* code = Code::cast(current);
2701 if (IsMarked(code)) {
2702 DCHECK(code->is_weak_stub());
2703 IC::InvalidateMaps(code);
2705 current = code->next_code_link();
2706 code->set_next_code_link(undefined);
2711 void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
2712 DisallowHeapAllocation no_allocation;
2713 DependentCode::GroupStartIndexes starts(entries);
2714 int number_of_entries = starts.number_of_entries();
2715 if (number_of_entries == 0) return;
2716 int g = DependentCode::kWeakICGroup;
2717 if (starts.at(g) != starts.at(g + 1)) {
2718 int i = starts.at(g);
2719 DCHECK(i + 1 == starts.at(g + 1));
2720 Object* head = entries->object_at(i);
2721 ClearDependentICList(head);
2723 g = DependentCode::kWeakCodeGroup;
2724 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2725 // If the entry is compilation info then the map must be alive,
2726 // and ClearDependentCode shouldn't be called.
2727 DCHECK(entries->is_code_at(i));
2728 Code* code = entries->code_at(i);
2729 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2730 code->set_marked_for_deoptimization(true);
2731 code->InvalidateEmbeddedObjects();
2732 have_code_to_deoptimize_ = true;
2735 for (int i = 0; i < number_of_entries; i++) {
2736 entries->clear_at(i);
2741 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2742 DependentCode* entries, int group, int start, int end, int new_start) {
2744 if (group == DependentCode::kWeakICGroup) {
2745 // Dependent weak IC stubs form a linked list and only the head is stored
2746 // in the dependent code array.
2748 DCHECK(start + 1 == end);
2749 Object* old_head = entries->object_at(start);
2750 MarkCompactWeakObjectRetainer retainer;
2751 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2752 entries->set_object_at(new_start, head);
2753 Object** slot = entries->slot_at(new_start);
2754 RecordSlot(slot, slot, head);
2755 // We do not compact this group even if the head is undefined,
2756 // more dependent ICs are likely to be added later.
2760 for (int i = start; i < end; i++) {
2761 Object* obj = entries->object_at(i);
2762 DCHECK(obj->IsCode() || IsMarked(obj));
2763 if (IsMarked(obj) &&
2764 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2765 if (new_start + survived != i) {
2766 entries->set_object_at(new_start + survived, obj);
2768 Object** slot = entries->slot_at(new_start + survived);
2769 RecordSlot(slot, slot, obj);
2774 entries->set_number_of_entries(
2775 static_cast<DependentCode::DependencyGroup>(group), survived);
2780 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2781 DisallowHeapAllocation no_allocation;
2782 DependentCode::GroupStartIndexes starts(entries);
2783 int number_of_entries = starts.number_of_entries();
2784 if (number_of_entries == 0) return;
2785 int new_number_of_entries = 0;
2786 // Go through all groups, remove dead codes and compact.
2787 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2788 int survived = ClearNonLiveDependentCodeInGroup(
2789 entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2790 new_number_of_entries += survived;
2792 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2793 entries->clear_at(i);
2798 void MarkCompactCollector::ProcessWeakCollections() {
2799 GCTracer::Scope gc_scope(heap()->tracer(),
2800 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2801 Object* weak_collection_obj = heap()->encountered_weak_collections();
2802 while (weak_collection_obj != Smi::FromInt(0)) {
2803 JSWeakCollection* weak_collection =
2804 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2805 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2806 if (weak_collection->table()->IsHashTable()) {
2807 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2808 Object** anchor = reinterpret_cast<Object**>(table->address());
2809 for (int i = 0; i < table->Capacity(); i++) {
2810 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2812 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2813 RecordSlot(anchor, key_slot, *key_slot);
2814 Object** value_slot =
2815 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2816 MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2821 weak_collection_obj = weak_collection->next();
2826 void MarkCompactCollector::ClearWeakCollections() {
2827 GCTracer::Scope gc_scope(heap()->tracer(),
2828 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2829 Object* weak_collection_obj = heap()->encountered_weak_collections();
2830 while (weak_collection_obj != Smi::FromInt(0)) {
2831 JSWeakCollection* weak_collection =
2832 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2833 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2834 if (weak_collection->table()->IsHashTable()) {
2835 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2836 for (int i = 0; i < table->Capacity(); i++) {
2837 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2838 if (!MarkCompactCollector::IsMarked(key)) {
2839 table->RemoveEntry(i);
2843 weak_collection_obj = weak_collection->next();
2844 weak_collection->set_next(heap()->undefined_value());
2846 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2850 void MarkCompactCollector::AbortWeakCollections() {
2851 GCTracer::Scope gc_scope(heap()->tracer(),
2852 GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
2853 Object* weak_collection_obj = heap()->encountered_weak_collections();
2854 while (weak_collection_obj != Smi::FromInt(0)) {
2855 JSWeakCollection* weak_collection =
2856 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2857 weak_collection_obj = weak_collection->next();
2858 weak_collection->set_next(heap()->undefined_value());
2860 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2864 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
2865 if (heap_->InNewSpace(value)) {
2866 heap_->store_buffer()->Mark(slot);
2867 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2868 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2869 reinterpret_cast<Object**>(slot),
2870 SlotsBuffer::IGNORE_OVERFLOW);
2875 // We scavange new space simultaneously with sweeping. This is done in two
2878 // The first pass migrates all alive objects from one semispace to another or
2879 // promotes them to old space. Forwarding address is written directly into
2880 // first word of object without any encoding. If object is dead we write
2881 // NULL as a forwarding address.
2883 // The second pass updates pointers to new space in all spaces. It is possible
2884 // to encounter pointers to dead new space objects during traversal of pointers
2885 // to new space. We should clear them to avoid encountering them during next
2886 // pointer iteration. This is an issue if the store buffer overflows and we
2887 // have to scan the entire old space, including dead objects, looking for
2888 // pointers to new space.
2889 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2890 int size, AllocationSpace dest) {
2891 Address dst_addr = dst->address();
2892 Address src_addr = src->address();
2893 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2894 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2895 if (dest == OLD_POINTER_SPACE) {
2896 Address src_slot = src_addr;
2897 Address dst_slot = dst_addr;
2898 DCHECK(IsAligned(size, kPointerSize));
2900 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2901 Object* value = Memory::Object_at(src_slot);
2903 Memory::Object_at(dst_slot) = value;
2905 // We special case ConstantPoolArrays below since they could contain
2906 // integers value entries which look like tagged pointers.
2907 // TODO(mstarzinger): restructure this code to avoid this special-casing.
2908 if (!src->IsConstantPoolArray()) {
2909 RecordMigratedSlot(value, dst_slot);
2912 src_slot += kPointerSize;
2913 dst_slot += kPointerSize;
2916 if (compacting_ && dst->IsJSFunction()) {
2917 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2918 Address code_entry = Memory::Address_at(code_entry_slot);
2920 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2921 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2922 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2923 SlotsBuffer::IGNORE_OVERFLOW);
2925 } else if (dst->IsConstantPoolArray()) {
2926 ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2927 ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2928 while (!code_iter.is_finished()) {
2929 Address code_entry_slot =
2930 dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2931 Address code_entry = Memory::Address_at(code_entry_slot);
2933 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2934 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2935 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2936 SlotsBuffer::IGNORE_OVERFLOW);
2939 ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2940 while (!heap_iter.is_finished()) {
2942 dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2943 Object* value = Memory::Object_at(heap_slot);
2944 RecordMigratedSlot(value, heap_slot);
2947 } else if (dest == CODE_SPACE) {
2948 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2949 heap()->MoveBlock(dst_addr, src_addr, size);
2950 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2951 SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
2952 SlotsBuffer::IGNORE_OVERFLOW);
2953 Code::cast(dst)->Relocate(dst_addr - src_addr);
2955 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2956 heap()->MoveBlock(dst_addr, src_addr, size);
2958 heap()->OnMoveEvent(dst, src, size);
2959 Memory::Address_at(src_addr) = dst_addr;
2963 // Visitor for updating pointers from live objects in old spaces to new space.
2964 // It does not expect to encounter pointers to dead objects.
2965 class PointersUpdatingVisitor : public ObjectVisitor {
2967 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2969 void VisitPointer(Object** p) { UpdatePointer(p); }
2971 void VisitPointers(Object** start, Object** end) {
2972 for (Object** p = start; p < end; p++) UpdatePointer(p);
2975 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2976 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2977 Object* target = rinfo->target_object();
2978 Object* old_target = target;
2979 VisitPointer(&target);
2980 // Avoid unnecessary changes that might unnecessary flush the instruction
2982 if (target != old_target) {
2983 rinfo->set_target_object(target);
2987 void VisitCodeTarget(RelocInfo* rinfo) {
2988 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2989 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2990 Object* old_target = target;
2991 VisitPointer(&target);
2992 if (target != old_target) {
2993 rinfo->set_target_address(Code::cast(target)->instruction_start());
2997 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2998 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2999 Object* stub = rinfo->code_age_stub();
3000 DCHECK(stub != NULL);
3001 VisitPointer(&stub);
3002 if (stub != rinfo->code_age_stub()) {
3003 rinfo->set_code_age_stub(Code::cast(stub));
3007 void VisitDebugTarget(RelocInfo* rinfo) {
3008 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
3009 rinfo->IsPatchedReturnSequence()) ||
3010 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
3011 rinfo->IsPatchedDebugBreakSlotSequence()));
3012 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
3013 VisitPointer(&target);
3014 rinfo->set_call_address(Code::cast(target)->instruction_start());
3017 static inline void UpdateSlot(Heap* heap, Object** slot) {
3018 Object* obj = *slot;
3020 if (!obj->IsHeapObject()) return;
3022 HeapObject* heap_obj = HeapObject::cast(obj);
3024 MapWord map_word = heap_obj->map_word();
3025 if (map_word.IsForwardingAddress()) {
3026 DCHECK(heap->InFromSpace(heap_obj) ||
3027 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
3028 HeapObject* target = map_word.ToForwardingAddress();
3030 DCHECK(!heap->InFromSpace(target) &&
3031 !MarkCompactCollector::IsOnEvacuationCandidate(target));
3036 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
3042 static void UpdatePointer(HeapObject** address, HeapObject* object) {
3043 Address new_addr = Memory::Address_at(object->address());
3045 // The new space sweep will overwrite the map word of dead objects
3046 // with NULL. In this case we do not need to transfer this entry to
3047 // the store buffer which we are rebuilding.
3048 // We perform the pointer update with a no barrier compare-and-swap. The
3049 // compare and swap may fail in the case where the pointer update tries to
3050 // update garbage memory which was concurrently accessed by the sweeper.
3051 if (new_addr != NULL) {
3052 base::NoBarrier_CompareAndSwap(
3053 reinterpret_cast<base::AtomicWord*>(address),
3054 reinterpret_cast<base::AtomicWord>(object),
3055 reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
3060 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3062 MapWord map_word = HeapObject::cast(*p)->map_word();
3064 if (map_word.IsForwardingAddress()) {
3065 return String::cast(map_word.ToForwardingAddress());
3068 return String::cast(*p);
3072 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3074 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3076 OldSpace* target_space = heap()->TargetSpace(object);
3078 DCHECK(target_space == heap()->old_pointer_space() ||
3079 target_space == heap()->old_data_space());
3081 AllocationResult allocation = target_space->AllocateRaw(object_size);
3082 if (allocation.To(&target)) {
3083 MigrateObject(target, object, object_size, target_space->identity());
3084 heap()->IncrementPromotedObjectsSize(object_size);
3092 void MarkCompactCollector::EvacuateNewSpace() {
3093 // There are soft limits in the allocation code, designed trigger a mark
3094 // sweep collection by failing allocations. But since we are already in
3095 // a mark-sweep allocation, there is no sense in trying to trigger one.
3096 AlwaysAllocateScope scope(isolate());
3098 NewSpace* new_space = heap()->new_space();
3100 // Store allocation range before flipping semispaces.
3101 Address from_bottom = new_space->bottom();
3102 Address from_top = new_space->top();
3104 // Flip the semispaces. After flipping, to space is empty, from space has
3107 new_space->ResetAllocationInfo();
3109 int survivors_size = 0;
3111 // First pass: traverse all objects in inactive semispace, remove marks,
3112 // migrate live objects and write forwarding addresses. This stage puts
3113 // new entries in the store buffer and may cause some pages to be marked
3114 // scan-on-scavenge.
3115 NewSpacePageIterator it(from_bottom, from_top);
3116 while (it.has_next()) {
3117 NewSpacePage* p = it.next();
3118 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3121 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3122 new_space->set_age_mark(new_space->top());
3126 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3127 AlwaysAllocateScope always_allocate(isolate());
3128 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3129 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3130 p->MarkSweptPrecisely();
3134 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3135 Address cell_base = it.CurrentCellBase();
3136 MarkBit::CellType* cell = it.CurrentCell();
3138 if (*cell == 0) continue;
3140 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3141 for (int i = 0; i < live_objects; i++) {
3142 Address object_addr = cell_base + offsets[i] * kPointerSize;
3143 HeapObject* object = HeapObject::FromAddress(object_addr);
3144 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3146 int size = object->Size();
3148 HeapObject* target_object;
3149 AllocationResult allocation = space->AllocateRaw(size);
3150 if (!allocation.To(&target_object)) {
3151 // If allocation failed, use emergency memory and re-try allocation.
3152 CHECK(space->HasEmergencyMemory());
3153 space->UseEmergencyMemory();
3154 allocation = space->AllocateRaw(size);
3156 if (!allocation.To(&target_object)) {
3157 // OS refused to give us memory.
3158 V8::FatalProcessOutOfMemory("Evacuation");
3162 MigrateObject(target_object, object, size, space->identity());
3163 DCHECK(object->map_word().IsForwardingAddress());
3166 // Clear marking bits for current cell.
3169 p->ResetLiveBytes();
3173 void MarkCompactCollector::EvacuatePages() {
3174 int npages = evacuation_candidates_.length();
3175 for (int i = 0; i < npages; i++) {
3176 Page* p = evacuation_candidates_[i];
3177 DCHECK(p->IsEvacuationCandidate() ||
3178 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3179 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3180 MemoryChunk::SWEEPING_DONE);
3181 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3182 // Allocate emergency memory for the case when compaction fails due to out
3184 if (!space->HasEmergencyMemory()) {
3185 space->CreateEmergencyMemory();
3187 if (p->IsEvacuationCandidate()) {
3188 // During compaction we might have to request a new page. Check that we
3189 // have an emergency page and the space still has room for that.
3190 if (space->HasEmergencyMemory() && space->CanExpand()) {
3191 EvacuateLiveObjectsFromPage(p);
3193 // Without room for expansion evacuation is not guaranteed to succeed.
3194 // Pessimistically abandon unevacuated pages.
3195 for (int j = i; j < npages; j++) {
3196 Page* page = evacuation_candidates_[j];
3197 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3198 page->ClearEvacuationCandidate();
3199 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3206 // Release emergency memory.
3207 PagedSpaces spaces(heap());
3208 for (PagedSpace* space = spaces.next(); space != NULL;
3209 space = spaces.next()) {
3210 if (space->HasEmergencyMemory()) {
3211 space->FreeEmergencyMemory();
3218 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3220 virtual Object* RetainAs(Object* object) {
3221 if (object->IsHeapObject()) {
3222 HeapObject* heap_object = HeapObject::cast(object);
3223 MapWord map_word = heap_object->map_word();
3224 if (map_word.IsForwardingAddress()) {
3225 return map_word.ToForwardingAddress();
3233 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3234 SlotsBuffer::SlotType slot_type, Address addr) {
3235 switch (slot_type) {
3236 case SlotsBuffer::CODE_TARGET_SLOT: {
3237 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3238 rinfo.Visit(isolate, v);
3241 case SlotsBuffer::CODE_ENTRY_SLOT: {
3242 v->VisitCodeEntry(addr);
3245 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3246 HeapObject* obj = HeapObject::FromAddress(addr);
3247 Code::cast(obj)->CodeIterateBody(v);
3250 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3251 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3252 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3255 case SlotsBuffer::JS_RETURN_SLOT: {
3256 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3257 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3260 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3261 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3262 rinfo.Visit(isolate, v);
3272 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3275 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3278 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3281 template <MarkCompactCollector::SweepingParallelism mode>
3282 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3284 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3285 DCHECK(free_list == NULL);
3286 return space->Free(start, size);
3288 // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3289 return size - free_list->Free(start, size);
3294 // Sweep a space precisely. After this has been done the space can
3295 // be iterated precisely, hitting only the live objects. Code space
3296 // is always swept precisely because we want to be able to iterate
3297 // over it. Map space is swept precisely, because it is not compacted.
3298 // Slots in live objects pointing into evacuation candidates are updated
3300 // Returns the size of the biggest continuous freed memory chunk in bytes.
3301 template <SweepingMode sweeping_mode,
3302 MarkCompactCollector::SweepingParallelism parallelism,
3303 SkipListRebuildingMode skip_list_mode,
3304 FreeSpaceTreatmentMode free_space_mode>
3305 static int SweepPrecisely(PagedSpace* space, FreeList* free_list, Page* p,
3307 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3308 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3309 space->identity() == CODE_SPACE);
3310 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3311 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3312 sweeping_mode == SWEEP_ONLY);
3314 Address free_start = p->area_start();
3315 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3318 SkipList* skip_list = p->skip_list();
3319 int curr_region = -1;
3320 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3324 intptr_t freed_bytes = 0;
3325 intptr_t max_freed_bytes = 0;
3327 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3328 Address cell_base = it.CurrentCellBase();
3329 MarkBit::CellType* cell = it.CurrentCell();
3330 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3332 for (; live_objects != 0; live_objects--) {
3333 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3334 if (free_end != free_start) {
3335 int size = static_cast<int>(free_end - free_start);
3336 if (free_space_mode == ZAP_FREE_SPACE) {
3337 memset(free_start, 0xcc, size);
3339 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3340 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3341 #ifdef ENABLE_GDB_JIT_INTERFACE
3342 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3343 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3347 HeapObject* live_object = HeapObject::FromAddress(free_end);
3348 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3349 Map* map = live_object->map();
3350 int size = live_object->SizeFromMap(map);
3351 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3352 live_object->IterateBody(map->instance_type(), size, v);
3354 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3355 int new_region_start = SkipList::RegionNumber(free_end);
3356 int new_region_end =
3357 SkipList::RegionNumber(free_end + size - kPointerSize);
3358 if (new_region_start != curr_region || new_region_end != curr_region) {
3359 skip_list->AddObject(free_end, size);
3360 curr_region = new_region_end;
3363 free_start = free_end + size;
3365 // Clear marking bits for current cell.
3368 if (free_start != p->area_end()) {
3369 int size = static_cast<int>(p->area_end() - free_start);
3370 if (free_space_mode == ZAP_FREE_SPACE) {
3371 memset(free_start, 0xcc, size);
3373 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3374 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3375 #ifdef ENABLE_GDB_JIT_INTERFACE
3376 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3377 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3381 p->ResetLiveBytes();
3383 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3384 // When concurrent sweeping is active, the page will be marked after
3385 // sweeping by the main thread.
3386 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3388 p->MarkSweptPrecisely();
3390 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3394 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3395 Page* p = Page::FromAddress(code->address());
3397 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3401 Address code_start = code->address();
3402 Address code_end = code_start + code->Size();
3404 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3405 uint32_t end_index =
3406 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3408 Bitmap* b = p->markbits();
3410 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3411 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3413 MarkBit::CellType* start_cell = start_mark_bit.cell();
3414 MarkBit::CellType* end_cell = end_mark_bit.cell();
3417 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3418 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3420 if (start_cell == end_cell) {
3421 *start_cell |= start_mask & end_mask;
3423 *start_cell |= start_mask;
3424 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3427 *end_cell |= end_mask;
3430 for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3439 static bool IsOnInvalidatedCodeObject(Address addr) {
3440 // We did not record any slots in large objects thus
3441 // we can safely go to the page from the slot address.
3442 Page* p = Page::FromAddress(addr);
3444 // First check owner's identity because old pointer and old data spaces
3445 // are swept lazily and might still have non-zero mark-bits on some
3447 if (p->owner()->identity() != CODE_SPACE) return false;
3449 // In code space only bits on evacuation candidates (but we don't record
3450 // any slots on them) and under invalidated code objects are non-zero.
3452 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3454 return mark_bit.Get();
3458 void MarkCompactCollector::InvalidateCode(Code* code) {
3459 if (heap_->incremental_marking()->IsCompacting() &&
3460 !ShouldSkipEvacuationSlotRecording(code)) {
3461 DCHECK(compacting_);
3463 // If the object is white than no slots were recorded on it yet.
3464 MarkBit mark_bit = Marking::MarkBitFrom(code);
3465 if (Marking::IsWhite(mark_bit)) return;
3467 invalidated_code_.Add(code);
3472 // Return true if the given code is deoptimized or will be deoptimized.
3473 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3474 return code->is_optimized_code() && code->marked_for_deoptimization();
3478 bool MarkCompactCollector::MarkInvalidatedCode() {
3479 bool code_marked = false;
3481 int length = invalidated_code_.length();
3482 for (int i = 0; i < length; i++) {
3483 Code* code = invalidated_code_[i];
3485 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3494 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3495 int length = invalidated_code_.length();
3496 for (int i = 0; i < length; i++) {
3497 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3502 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3503 int length = invalidated_code_.length();
3504 for (int i = 0; i < length; i++) {
3505 Code* code = invalidated_code_[i];
3507 code->Iterate(visitor);
3508 SetMarkBitsUnderInvalidatedCode(code, false);
3511 invalidated_code_.Rewind(0);
3515 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3516 Heap::RelocationLock relocation_lock(heap());
3518 bool code_slots_filtering_required;
3520 GCTracer::Scope gc_scope(heap()->tracer(),
3521 GCTracer::Scope::MC_SWEEP_NEWSPACE);
3522 code_slots_filtering_required = MarkInvalidatedCode();
3527 GCTracer::Scope gc_scope(heap()->tracer(),
3528 GCTracer::Scope::MC_EVACUATE_PAGES);
3532 // Second pass: find pointers to new space and update them.
3533 PointersUpdatingVisitor updating_visitor(heap());
3536 GCTracer::Scope gc_scope(heap()->tracer(),
3537 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3538 // Update pointers in to space.
3539 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3540 heap()->new_space()->top());
3541 for (HeapObject* object = to_it.Next(); object != NULL;
3542 object = to_it.Next()) {
3543 Map* map = object->map();
3544 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3550 GCTracer::Scope gc_scope(heap()->tracer(),
3551 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3553 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3557 GCTracer::Scope gc_scope(heap()->tracer(),
3558 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3559 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3560 &Heap::ScavengeStoreBufferCallback);
3561 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3566 GCTracer::Scope gc_scope(heap()->tracer(),
3567 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3568 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
3569 code_slots_filtering_required);
3570 if (FLAG_trace_fragmentation) {
3571 PrintF(" migration slots buffer: %d\n",
3572 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3575 if (compacting_ && was_marked_incrementally_) {
3576 // It's difficult to filter out slots recorded for large objects.
3577 LargeObjectIterator it(heap_->lo_space());
3578 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3579 // LargeObjectSpace is not swept yet thus we have to skip
3580 // dead objects explicitly.
3581 if (!IsMarked(obj)) continue;
3583 Page* p = Page::FromAddress(obj->address());
3584 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3585 obj->Iterate(&updating_visitor);
3586 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3592 int npages = evacuation_candidates_.length();
3594 GCTracer::Scope gc_scope(
3596 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3597 for (int i = 0; i < npages; i++) {
3598 Page* p = evacuation_candidates_[i];
3599 DCHECK(p->IsEvacuationCandidate() ||
3600 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3602 if (p->IsEvacuationCandidate()) {
3603 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
3604 code_slots_filtering_required);
3605 if (FLAG_trace_fragmentation) {
3606 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3607 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3610 // Important: skip list should be cleared only after roots were updated
3611 // because root iteration traverses the stack and might have to find
3612 // code objects from non-updated pc pointing into evacuation candidate.
3613 SkipList* list = p->skip_list();
3614 if (list != NULL) list->Clear();
3616 if (FLAG_gc_verbose) {
3617 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3618 reinterpret_cast<intptr_t>(p));
3620 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3621 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3623 switch (space->identity()) {
3624 case OLD_DATA_SPACE:
3625 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
3627 case OLD_POINTER_SPACE:
3628 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3629 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
3630 space, NULL, p, &updating_visitor);
3633 if (FLAG_zap_code_space) {
3634 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3635 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
3636 space, NULL, p, &updating_visitor);
3638 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3639 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
3640 space, NULL, p, &updating_visitor);
3651 GCTracer::Scope gc_scope(heap()->tracer(),
3652 GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3654 // Update pointers from cells.
3655 HeapObjectIterator cell_iterator(heap_->cell_space());
3656 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3657 cell = cell_iterator.Next()) {
3658 if (cell->IsCell()) {
3659 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3663 HeapObjectIterator js_global_property_cell_iterator(
3664 heap_->property_cell_space());
3665 for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3666 cell = js_global_property_cell_iterator.Next()) {
3667 if (cell->IsPropertyCell()) {
3668 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3672 heap_->string_table()->Iterate(&updating_visitor);
3673 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3674 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3675 WeakHashTable* table =
3676 WeakHashTable::cast(heap_->weak_object_to_code_table());
3677 table->Iterate(&updating_visitor);
3678 table->Rehash(heap_->isolate()->factory()->undefined_value());
3681 // Update pointers from external string table.
3682 heap_->UpdateReferencesInExternalStringTable(
3683 &UpdateReferenceInExternalStringTableEntry);
3685 EvacuationWeakObjectRetainer evacuation_object_retainer;
3686 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3688 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3690 ProcessInvalidatedCode(&updating_visitor);
3692 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3694 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3695 DCHECK(migration_slots_buffer_ == NULL);
3699 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3700 int npages = evacuation_candidates_.length();
3701 for (int i = 0; i < npages; i++) {
3702 Page* p = evacuation_candidates_[i];
3703 if (!p->IsEvacuationCandidate()) continue;
3705 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3706 p->InsertAfter(space->LastPage());
3711 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3712 int npages = evacuation_candidates_.length();
3713 for (int i = 0; i < npages; i++) {
3714 Page* p = evacuation_candidates_[i];
3715 if (!p->IsEvacuationCandidate()) continue;
3716 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3717 space->Free(p->area_start(), p->area_size());
3718 p->set_scan_on_scavenge(false);
3719 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3720 p->ResetLiveBytes();
3721 space->ReleasePage(p);
3723 evacuation_candidates_.Rewind(0);
3724 compacting_ = false;
3725 heap()->FreeQueuedChunks();
3729 static const int kStartTableEntriesPerLine = 5;
3730 static const int kStartTableLines = 171;
3731 static const int kStartTableInvalidLine = 127;
3732 static const int kStartTableUnusedEntry = 126;
3734 #define _ kStartTableUnusedEntry
3735 #define X kStartTableInvalidLine
3736 // Mark-bit to object start offset table.
3738 // The line is indexed by the mark bits in a byte. The first number on
3739 // the line describes the number of live object starts for the line and the
3740 // other numbers on the line describe the offsets (in words) of the object
3743 // Since objects are at least 2 words large we don't have entries for two
3744 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3745 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
4093 // Takes a word of mark bits. Returns the number of objects that start in the
4094 // range. Puts the offsets of the words in the supplied array.
4095 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
4099 // No consecutive 1 bits.
4100 DCHECK((mark_bits & 0x180) != 0x180);
4101 DCHECK((mark_bits & 0x18000) != 0x18000);
4102 DCHECK((mark_bits & 0x1800000) != 0x1800000);
4104 while (mark_bits != 0) {
4105 int byte = (mark_bits & 0xff);
4108 DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
4109 char* table = kStartTable + byte * kStartTableEntriesPerLine;
4110 int objects_in_these_8_words = table[0];
4111 DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
4112 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
4113 for (int i = 0; i < objects_in_these_8_words; i++) {
4114 starts[objects++] = offset + table[1 + i];
4123 static inline Address DigestFreeStart(Address approximate_free_start,
4124 uint32_t free_start_cell) {
4125 DCHECK(free_start_cell != 0);
4127 // No consecutive 1 bits.
4128 DCHECK((free_start_cell & (free_start_cell << 1)) == 0);
4131 uint32_t cell = free_start_cell;
4132 int offset_of_last_live;
4133 if ((cell & 0x80000000u) != 0) {
4134 // This case would overflow below.
4135 offset_of_last_live = 31;
4137 // Remove all but one bit, the most significant. This is an optimization
4138 // that may or may not be worthwhile.
4144 cell = (cell + 1) >> 1;
4145 int live_objects = MarkWordToObjectStarts(cell, offsets);
4146 DCHECK(live_objects == 1);
4147 offset_of_last_live = offsets[live_objects - 1];
4149 Address last_live_start =
4150 approximate_free_start + offset_of_last_live * kPointerSize;
4151 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
4152 Address free_start = last_live_start + last_live->Size();
4157 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
4160 // No consecutive 1 bits.
4161 DCHECK((cell & (cell << 1)) == 0);
4164 if (cell == 0x80000000u) { // Avoid overflow below.
4165 return block_address + 31 * kPointerSize;
4167 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
4168 DCHECK((first_set_bit & cell) == first_set_bit);
4169 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
4170 DCHECK(live_objects == 1);
4172 return block_address + offsets[0] * kPointerSize;
4176 // Force instantiation of templatized SweepConservatively method for
4177 // SWEEP_ON_MAIN_THREAD mode.
4178 template int MarkCompactCollector::SweepConservatively<
4179 MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(PagedSpace*, FreeList*, Page*);
4182 // Force instantiation of templatized SweepConservatively method for
4183 // SWEEP_IN_PARALLEL mode.
4184 template int MarkCompactCollector::SweepConservatively<
4185 MarkCompactCollector::SWEEP_IN_PARALLEL>(PagedSpace*, FreeList*, Page*);
4188 // Sweeps a space conservatively. After this has been done the larger free
4189 // spaces have been put on the free list and the smaller ones have been
4190 // ignored and left untouched. A free space is always either ignored or put
4191 // on the free list, never split up into two parts. This is important
4192 // because it means that any FreeSpace maps left actually describe a region of
4193 // memory that can be ignored when scanning. Dead objects other than free
4194 // spaces will not contain the free space map.
4195 template <MarkCompactCollector::SweepingParallelism mode>
4196 int MarkCompactCollector::SweepConservatively(PagedSpace* space,
4197 FreeList* free_list, Page* p) {
4198 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
4200 (mode == MarkCompactCollector::SWEEP_IN_PARALLEL && free_list != NULL) ||
4201 (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
4202 free_list == NULL));
4204 intptr_t freed_bytes = 0;
4205 intptr_t max_freed_bytes = 0;
4208 // Skip over all the dead objects at the start of the page and mark them free.
4209 Address cell_base = 0;
4210 MarkBit::CellType* cell = NULL;
4211 MarkBitCellIterator it(p);
4212 for (; !it.Done(); it.Advance()) {
4213 cell_base = it.CurrentCellBase();
4214 cell = it.CurrentCell();
4215 if (*cell != 0) break;
4219 size = p->area_end() - p->area_start();
4221 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
4222 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4223 DCHECK_EQ(0, p->LiveBytes());
4224 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
4225 // When concurrent sweeping is active, the page will be marked after
4226 // sweeping by the main thread.
4227 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
4229 p->MarkSweptConservatively();
4231 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
4234 // Grow the size of the start-of-page free space a little to get up to the
4235 // first live object.
4236 Address free_end = StartOfLiveObject(cell_base, *cell);
4237 // Free the first free space.
4238 size = free_end - p->area_start();
4240 Free<mode>(space, free_list, p->area_start(), static_cast<int>(size));
4241 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4243 // The start of the current free area is represented in undigested form by
4244 // the address of the last 32-word section that contained a live object and
4245 // the marking bitmap for that cell, which describes where the live object
4246 // started. Unless we find a large free space in the bitmap we will not
4247 // digest this pair into a real address. We start the iteration here at the
4248 // first word in the marking bit map that indicates a live object.
4249 Address free_start = cell_base;
4250 MarkBit::CellType free_start_cell = *cell;
4252 for (; !it.Done(); it.Advance()) {
4253 cell_base = it.CurrentCellBase();
4254 cell = it.CurrentCell();
4256 // We have a live object. Check approximately whether it is more than 32
4257 // words since the last live object.
4258 if (cell_base - free_start > 32 * kPointerSize) {
4259 free_start = DigestFreeStart(free_start, free_start_cell);
4260 if (cell_base - free_start > 32 * kPointerSize) {
4261 // Now that we know the exact start of the free space it still looks
4262 // like we have a large enough free space to be worth bothering with.
4263 // so now we need to find the start of the first live object at the
4264 // end of the free space.
4265 free_end = StartOfLiveObject(cell_base, *cell);
4266 freed_bytes = Free<mode>(space, free_list, free_start,
4267 static_cast<int>(free_end - free_start));
4268 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4271 // Update our undigested record of where the current free area started.
4272 free_start = cell_base;
4273 free_start_cell = *cell;
4274 // Clear marking bits for current cell.
4279 // Handle the free space at the end of the page.
4280 if (cell_base - free_start > 32 * kPointerSize) {
4281 free_start = DigestFreeStart(free_start, free_start_cell);
4282 freed_bytes = Free<mode>(space, free_list, free_start,
4283 static_cast<int>(p->area_end() - free_start));
4284 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4287 p->ResetLiveBytes();
4288 if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
4289 // When concurrent sweeping is active, the page will be marked after
4290 // sweeping by the main thread.
4291 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
4293 p->MarkSweptConservatively();
4295 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
4299 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4300 int required_freed_bytes) {
4302 int max_freed_overall = 0;
4303 PageIterator it(space);
4304 while (it.has_next()) {
4305 Page* p = it.next();
4306 max_freed = SweepInParallel(p, space);
4307 DCHECK(max_freed >= 0);
4308 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4311 max_freed_overall = Max(max_freed, max_freed_overall);
4312 if (p == space->end_of_unswept_pages()) break;
4314 return max_freed_overall;
4318 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4320 if (page->TryParallelSweeping()) {
4321 FreeList* free_list = space == heap()->old_pointer_space()
4322 ? free_list_old_pointer_space_.get()
4323 : free_list_old_data_space_.get();
4324 FreeList private_free_list(space);
4325 if (space->swept_precisely()) {
4326 max_freed = SweepPrecisely<SWEEP_ONLY, SWEEP_IN_PARALLEL,
4327 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4328 space, &private_free_list, page, NULL);
4330 max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
4331 space, &private_free_list, page);
4333 free_list->Concatenate(&private_free_list);
4339 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4340 space->set_swept_precisely(sweeper == PRECISE ||
4341 sweeper == CONCURRENT_PRECISE ||
4342 sweeper == PARALLEL_PRECISE);
4343 space->ClearStats();
4345 // We defensively initialize end_of_unswept_pages_ here with the first page
4346 // of the pages list.
4347 space->set_end_of_unswept_pages(space->FirstPage());
4349 PageIterator it(space);
4351 int pages_swept = 0;
4352 bool unused_page_present = false;
4353 bool parallel_sweeping_active = false;
4355 while (it.has_next()) {
4356 Page* p = it.next();
4357 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4359 // Clear sweeping flags indicating that marking bits are still intact.
4360 p->ClearSweptPrecisely();
4361 p->ClearSweptConservatively();
4363 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4364 p->IsEvacuationCandidate()) {
4365 // Will be processed in EvacuateNewSpaceAndCandidates.
4366 DCHECK(evacuation_candidates_.length() > 0);
4370 // One unused page is kept, all further are released before sweeping them.
4371 if (p->LiveBytes() == 0) {
4372 if (unused_page_present) {
4373 if (FLAG_gc_verbose) {
4374 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4375 reinterpret_cast<intptr_t>(p));
4377 // Adjust unswept free bytes because releasing a page expects said
4378 // counter to be accurate for unswept pages.
4379 space->IncreaseUnsweptFreeBytes(p);
4380 space->ReleasePage(p);
4383 unused_page_present = true;
4387 case CONCURRENT_CONSERVATIVE:
4388 case PARALLEL_CONSERVATIVE: {
4389 if (!parallel_sweeping_active) {
4390 if (FLAG_gc_verbose) {
4391 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4392 reinterpret_cast<intptr_t>(p));
4394 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4396 parallel_sweeping_active = true;
4398 if (FLAG_gc_verbose) {
4399 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4400 reinterpret_cast<intptr_t>(p));
4402 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4403 space->IncreaseUnsweptFreeBytes(p);
4405 space->set_end_of_unswept_pages(p);
4408 case CONCURRENT_PRECISE:
4409 case PARALLEL_PRECISE:
4410 if (!parallel_sweeping_active) {
4411 if (FLAG_gc_verbose) {
4412 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4413 reinterpret_cast<intptr_t>(p));
4415 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4416 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4418 parallel_sweeping_active = true;
4420 if (FLAG_gc_verbose) {
4421 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4422 reinterpret_cast<intptr_t>(p));
4424 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4425 space->IncreaseUnsweptFreeBytes(p);
4427 space->set_end_of_unswept_pages(p);
4430 if (FLAG_gc_verbose) {
4431 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4432 reinterpret_cast<intptr_t>(p));
4434 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4435 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4436 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4437 } else if (space->identity() == CODE_SPACE) {
4438 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4439 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4441 SweepPrecisely<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4442 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4447 default: { UNREACHABLE(); }
4451 if (FLAG_gc_verbose) {
4452 PrintF("SweepSpace: %s (%d pages swept)\n",
4453 AllocationSpaceName(space->identity()), pages_swept);
4456 // Give pages that are queued to be freed back to the OS.
4457 heap()->FreeQueuedChunks();
4461 static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
4462 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4463 type == MarkCompactCollector::CONCURRENT_CONSERVATIVE ||
4464 type == MarkCompactCollector::PARALLEL_PRECISE ||
4465 type == MarkCompactCollector::CONCURRENT_PRECISE;
4469 static bool ShouldWaitForSweeperThreads(
4470 MarkCompactCollector::SweeperType type) {
4471 return type == MarkCompactCollector::PARALLEL_CONSERVATIVE ||
4472 type == MarkCompactCollector::PARALLEL_PRECISE;
4476 void MarkCompactCollector::SweepSpaces() {
4477 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4478 double start_time = 0.0;
4479 if (FLAG_print_cumulative_gc_stat) {
4480 start_time = base::OS::TimeCurrentMillis();
4484 state_ = SWEEP_SPACES;
4486 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
4487 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4488 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4489 if (FLAG_always_precise_sweeping && FLAG_parallel_sweeping) {
4490 how_to_sweep = PARALLEL_PRECISE;
4492 if (FLAG_always_precise_sweeping && FLAG_concurrent_sweeping) {
4493 how_to_sweep = CONCURRENT_PRECISE;
4495 if (sweep_precisely_) how_to_sweep = PRECISE;
4497 MoveEvacuationCandidatesToEndOfPagesList();
4499 // Noncompacting collections simply sweep the spaces to clear the mark
4500 // bits and free the nonlive blocks (for old and map spaces). We sweep
4501 // the map space last because freeing non-live maps overwrites them and
4502 // the other spaces rely on possibly non-live maps to get the sizes for
4503 // non-live objects.
4505 GCTracer::Scope sweep_scope(heap()->tracer(),
4506 GCTracer::Scope::MC_SWEEP_OLDSPACE);
4508 SequentialSweepingScope scope(this);
4509 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4510 SweepSpace(heap()->old_data_space(), how_to_sweep);
4513 if (ShouldStartSweeperThreads(how_to_sweep)) {
4514 StartSweeperThreads();
4517 if (ShouldWaitForSweeperThreads(how_to_sweep)) {
4518 EnsureSweepingCompleted();
4521 RemoveDeadInvalidatedCode();
4524 GCTracer::Scope sweep_scope(heap()->tracer(),
4525 GCTracer::Scope::MC_SWEEP_CODE);
4526 SweepSpace(heap()->code_space(), PRECISE);
4530 GCTracer::Scope sweep_scope(heap()->tracer(),
4531 GCTracer::Scope::MC_SWEEP_CELL);
4532 SweepSpace(heap()->cell_space(), PRECISE);
4533 SweepSpace(heap()->property_cell_space(), PRECISE);
4536 EvacuateNewSpaceAndCandidates();
4538 // ClearNonLiveTransitions depends on precise sweeping of map space to
4539 // detect whether unmarked map became dead in this collection or in one
4540 // of the previous ones.
4542 GCTracer::Scope sweep_scope(heap()->tracer(),
4543 GCTracer::Scope::MC_SWEEP_MAP);
4544 SweepSpace(heap()->map_space(), PRECISE);
4547 // Deallocate unmarked objects and clear marked bits for marked objects.
4548 heap_->lo_space()->FreeUnmarkedObjects();
4550 // Deallocate evacuated candidate pages.
4551 ReleaseEvacuationCandidates();
4553 if (FLAG_print_cumulative_gc_stat) {
4554 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4560 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4561 PageIterator it(space);
4562 while (it.has_next()) {
4563 Page* p = it.next();
4564 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4565 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4566 if (space->swept_precisely()) {
4567 p->MarkSweptPrecisely();
4569 p->MarkSweptConservatively();
4572 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4577 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4578 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4579 ParallelSweepSpaceComplete(heap()->old_data_space());
4583 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4584 if (isolate()->debug()->is_loaded() ||
4585 isolate()->debug()->has_break_points()) {
4590 if (code_flusher_ != NULL) return;
4591 code_flusher_ = new CodeFlusher(isolate());
4593 if (code_flusher_ == NULL) return;
4594 code_flusher_->EvictAllCandidates();
4595 delete code_flusher_;
4596 code_flusher_ = NULL;
4599 if (FLAG_trace_code_flushing) {
4600 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4605 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4606 // Our profiling tools do not expect intersections between
4607 // code objects. We should either reenable it or change our tools.
4608 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4610 if (obj->IsCode()) {
4611 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4616 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4619 void MarkCompactCollector::Initialize() {
4620 MarkCompactMarkingVisitor::Initialize();
4621 IncrementalMarking::Initialize();
4625 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4626 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4630 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4631 SlotsBuffer** buffer_address, SlotType type,
4632 Address addr, AdditionMode mode) {
4633 SlotsBuffer* buffer = *buffer_address;
4634 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4635 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4636 allocator->DeallocateChain(buffer_address);
4639 buffer = allocator->AllocateBuffer(buffer);
4640 *buffer_address = buffer;
4642 DCHECK(buffer->HasSpaceForTypedSlot());
4643 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4644 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4649 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4650 if (RelocInfo::IsCodeTarget(rmode)) {
4651 return SlotsBuffer::CODE_TARGET_SLOT;
4652 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4653 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4654 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4655 return SlotsBuffer::DEBUG_TARGET_SLOT;
4656 } else if (RelocInfo::IsJSReturn(rmode)) {
4657 return SlotsBuffer::JS_RETURN_SLOT;
4660 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4664 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4665 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4666 RelocInfo::Mode rmode = rinfo->rmode();
4667 if (target_page->IsEvacuationCandidate() &&
4668 (rinfo->host() == NULL ||
4669 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4671 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4672 // This doesn't need to be typed since it is just a normal heap pointer.
4673 Object** target_pointer =
4674 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4675 success = SlotsBuffer::AddTo(
4676 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4677 target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4678 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4679 success = SlotsBuffer::AddTo(
4680 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4681 SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4682 SlotsBuffer::FAIL_ON_OVERFLOW);
4684 success = SlotsBuffer::AddTo(
4685 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4686 SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
4689 EvictEvacuationCandidate(target_page);
4695 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4696 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4697 if (target_page->IsEvacuationCandidate() &&
4698 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4699 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4700 target_page->slots_buffer_address(),
4701 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4702 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4703 EvictEvacuationCandidate(target_page);
4709 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4710 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4711 if (is_compacting()) {
4713 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4715 MarkBit mark_bit = Marking::MarkBitFrom(host);
4716 if (Marking::IsBlack(mark_bit)) {
4717 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4718 RecordRelocSlot(&rinfo, target);
4724 static inline SlotsBuffer::SlotType DecodeSlotType(
4725 SlotsBuffer::ObjectSlot slot) {
4726 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4730 void SlotsBuffer::UpdateSlots(Heap* heap) {
4731 PointersUpdatingVisitor v(heap);
4733 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4734 ObjectSlot slot = slots_[slot_idx];
4735 if (!IsTypedSlot(slot)) {
4736 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4739 DCHECK(slot_idx < idx_);
4740 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4741 reinterpret_cast<Address>(slots_[slot_idx]));
4747 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4748 PointersUpdatingVisitor v(heap);
4750 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4751 ObjectSlot slot = slots_[slot_idx];
4752 if (!IsTypedSlot(slot)) {
4753 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4754 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4758 DCHECK(slot_idx < idx_);
4759 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4760 if (!IsOnInvalidatedCodeObject(pc)) {
4761 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4762 reinterpret_cast<Address>(slots_[slot_idx]));
4769 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4770 return new SlotsBuffer(next_buffer);
4774 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4779 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4780 SlotsBuffer* buffer = *buffer_address;
4781 while (buffer != NULL) {
4782 SlotsBuffer* next_buffer = buffer->next();
4783 DeallocateBuffer(buffer);
4784 buffer = next_buffer;
4786 *buffer_address = NULL;
4789 } // namespace v8::internal