1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/cpu-profiler.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/gdb-jit.h"
15 #include "src/global-handles.h"
16 #include "src/heap/incremental-marking.h"
17 #include "src/heap/mark-compact.h"
18 #include "src/heap/objects-visiting.h"
19 #include "src/heap/objects-visiting-inl.h"
20 #include "src/heap/spaces-inl.h"
21 #include "src/heap-profiler.h"
22 #include "src/ic/ic.h"
23 #include "src/ic/stub-cache.h"
29 const char* Marking::kWhiteBitPattern = "00";
30 const char* Marking::kBlackBitPattern = "10";
31 const char* Marking::kGreyBitPattern = "11";
32 const char* Marking::kImpossibleBitPattern = "01";
35 // -------------------------------------------------------------------------
36 // MarkCompactCollector
38 MarkCompactCollector::MarkCompactCollector(Heap* heap)
43 reduce_memory_footprint_(false),
44 abort_incremental_marking_(false),
45 marking_parity_(ODD_MARKING_PARITY),
47 was_marked_incrementally_(false),
48 sweeping_in_progress_(false),
49 pending_sweeper_jobs_semaphore_(0),
50 sequential_sweeping_(false),
51 migration_slots_buffer_(NULL),
54 have_code_to_deoptimize_(false) {
58 class VerifyMarkingVisitor : public ObjectVisitor {
60 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
62 void VisitPointers(Object** start, Object** end) {
63 for (Object** current = start; current < end; current++) {
64 if ((*current)->IsHeapObject()) {
65 HeapObject* object = HeapObject::cast(*current);
66 CHECK(heap_->mark_compact_collector()->IsMarked(object));
71 void VisitEmbeddedPointer(RelocInfo* rinfo) {
72 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
73 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
74 Object* p = rinfo->target_object();
79 void VisitCell(RelocInfo* rinfo) {
80 Code* code = rinfo->host();
81 DCHECK(rinfo->rmode() == RelocInfo::CELL);
82 if (!code->IsWeakObject(rinfo->target_cell())) {
83 ObjectVisitor::VisitCell(rinfo);
92 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
93 VerifyMarkingVisitor visitor(heap);
95 Address next_object_must_be_here_or_later = bottom;
97 for (Address current = bottom; current < top; current += kPointerSize) {
98 object = HeapObject::FromAddress(current);
99 if (MarkCompactCollector::IsMarked(object)) {
100 CHECK(current >= next_object_must_be_here_or_later);
101 object->Iterate(&visitor);
102 next_object_must_be_here_or_later = current + object->Size();
108 static void VerifyMarking(NewSpace* space) {
109 Address end = space->top();
110 NewSpacePageIterator it(space->bottom(), end);
111 // The bottom position is at the start of its page. Allows us to use
112 // page->area_start() as start of range on all pages.
113 CHECK_EQ(space->bottom(),
114 NewSpacePage::FromAddress(space->bottom())->area_start());
115 while (it.has_next()) {
116 NewSpacePage* page = it.next();
117 Address limit = it.has_next() ? page->area_end() : end;
118 CHECK(limit == end || !page->Contains(end));
119 VerifyMarking(space->heap(), page->area_start(), limit);
124 static void VerifyMarking(PagedSpace* space) {
125 PageIterator it(space);
127 while (it.has_next()) {
129 VerifyMarking(space->heap(), p->area_start(), p->area_end());
134 static void VerifyMarking(Heap* heap) {
135 VerifyMarking(heap->old_pointer_space());
136 VerifyMarking(heap->old_data_space());
137 VerifyMarking(heap->code_space());
138 VerifyMarking(heap->cell_space());
139 VerifyMarking(heap->property_cell_space());
140 VerifyMarking(heap->map_space());
141 VerifyMarking(heap->new_space());
143 VerifyMarkingVisitor visitor(heap);
145 LargeObjectIterator it(heap->lo_space());
146 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
147 if (MarkCompactCollector::IsMarked(obj)) {
148 obj->Iterate(&visitor);
152 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
156 class VerifyEvacuationVisitor : public ObjectVisitor {
158 void VisitPointers(Object** start, Object** end) {
159 for (Object** current = start; current < end; current++) {
160 if ((*current)->IsHeapObject()) {
161 HeapObject* object = HeapObject::cast(*current);
162 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
169 static void VerifyEvacuation(Page* page) {
170 VerifyEvacuationVisitor visitor;
171 HeapObjectIterator iterator(page, NULL);
172 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
173 heap_object = iterator.Next()) {
174 // We skip free space objects.
175 if (!heap_object->IsFiller()) {
176 heap_object->Iterate(&visitor);
182 static void VerifyEvacuation(NewSpace* space) {
183 NewSpacePageIterator it(space->bottom(), space->top());
184 VerifyEvacuationVisitor visitor;
186 while (it.has_next()) {
187 NewSpacePage* page = it.next();
188 Address current = page->area_start();
189 Address limit = it.has_next() ? page->area_end() : space->top();
190 CHECK(limit == space->top() || !page->Contains(space->top()));
191 while (current < limit) {
192 HeapObject* object = HeapObject::FromAddress(current);
193 object->Iterate(&visitor);
194 current += object->Size();
200 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
201 if (FLAG_use_allocation_folding &&
202 (space == heap->old_pointer_space() || space == heap->old_data_space())) {
205 PageIterator it(space);
207 while (it.has_next()) {
209 if (p->IsEvacuationCandidate()) continue;
215 static void VerifyEvacuation(Heap* heap) {
216 VerifyEvacuation(heap, heap->old_pointer_space());
217 VerifyEvacuation(heap, heap->old_data_space());
218 VerifyEvacuation(heap, heap->code_space());
219 VerifyEvacuation(heap, heap->cell_space());
220 VerifyEvacuation(heap, heap->property_cell_space());
221 VerifyEvacuation(heap, heap->map_space());
222 VerifyEvacuation(heap->new_space());
224 VerifyEvacuationVisitor visitor;
225 heap->IterateStrongRoots(&visitor, VISIT_ALL);
227 #endif // VERIFY_HEAP
230 void MarkCompactCollector::SetUp() {
231 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
232 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
236 void MarkCompactCollector::TearDown() { AbortCompaction(); }
239 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
240 p->MarkEvacuationCandidate();
241 evacuation_candidates_.Add(p);
245 static void TraceFragmentation(PagedSpace* space) {
246 int number_of_pages = space->CountTotalPages();
247 intptr_t reserved = (number_of_pages * space->AreaSize());
248 intptr_t free = reserved - space->SizeOfObjects();
249 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
250 AllocationSpaceName(space->identity()), number_of_pages,
251 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
255 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
257 DCHECK(evacuation_candidates_.length() == 0);
259 #ifdef ENABLE_GDB_JIT_INTERFACE
260 // If GDBJIT interface is active disable compaction.
261 if (FLAG_gdbjit) return false;
264 CollectEvacuationCandidates(heap()->old_pointer_space());
265 CollectEvacuationCandidates(heap()->old_data_space());
267 if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
268 FLAG_incremental_code_compaction)) {
269 CollectEvacuationCandidates(heap()->code_space());
270 } else if (FLAG_trace_fragmentation) {
271 TraceFragmentation(heap()->code_space());
274 if (FLAG_trace_fragmentation) {
275 TraceFragmentation(heap()->map_space());
276 TraceFragmentation(heap()->cell_space());
277 TraceFragmentation(heap()->property_cell_space());
280 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
281 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
282 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
284 compacting_ = evacuation_candidates_.length() > 0;
291 void MarkCompactCollector::CollectGarbage() {
292 // Make sure that Prepare() has been called. The individual steps below will
293 // update the state as they proceed.
294 DCHECK(state_ == PREPARE_GC);
297 DCHECK(heap_->incremental_marking()->IsStopped());
299 if (FLAG_collect_maps) ClearNonLiveReferences();
301 ProcessAndClearWeakCells();
303 ClearWeakCollections();
305 heap_->set_encountered_weak_cells(Smi::FromInt(0));
308 if (FLAG_verify_heap) {
309 VerifyMarking(heap_);
316 if (heap()->weak_embedded_objects_verification_enabled()) {
317 VerifyWeakEmbeddedObjectsInCode();
319 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
320 VerifyOmittedMapChecks();
326 if (marking_parity_ == EVEN_MARKING_PARITY) {
327 marking_parity_ = ODD_MARKING_PARITY;
329 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
330 marking_parity_ = EVEN_MARKING_PARITY;
336 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
337 PageIterator it(space);
339 while (it.has_next()) {
341 CHECK(p->markbits()->IsClean());
342 CHECK_EQ(0, p->LiveBytes());
347 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
348 NewSpacePageIterator it(space->bottom(), space->top());
350 while (it.has_next()) {
351 NewSpacePage* p = it.next();
352 CHECK(p->markbits()->IsClean());
353 CHECK_EQ(0, p->LiveBytes());
358 void MarkCompactCollector::VerifyMarkbitsAreClean() {
359 VerifyMarkbitsAreClean(heap_->old_pointer_space());
360 VerifyMarkbitsAreClean(heap_->old_data_space());
361 VerifyMarkbitsAreClean(heap_->code_space());
362 VerifyMarkbitsAreClean(heap_->cell_space());
363 VerifyMarkbitsAreClean(heap_->property_cell_space());
364 VerifyMarkbitsAreClean(heap_->map_space());
365 VerifyMarkbitsAreClean(heap_->new_space());
367 LargeObjectIterator it(heap_->lo_space());
368 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
369 MarkBit mark_bit = Marking::MarkBitFrom(obj);
370 CHECK(Marking::IsWhite(mark_bit));
371 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
376 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
377 HeapObjectIterator code_iterator(heap()->code_space());
378 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
379 obj = code_iterator.Next()) {
380 Code* code = Code::cast(obj);
381 if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
382 if (WillBeDeoptimized(code)) continue;
383 code->VerifyEmbeddedObjectsDependency();
388 void MarkCompactCollector::VerifyOmittedMapChecks() {
389 HeapObjectIterator iterator(heap()->map_space());
390 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
391 Map* map = Map::cast(obj);
392 map->VerifyOmittedMapChecks();
395 #endif // VERIFY_HEAP
398 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
399 PageIterator it(space);
401 while (it.has_next()) {
402 Bitmap::Clear(it.next());
407 static void ClearMarkbitsInNewSpace(NewSpace* space) {
408 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
410 while (it.has_next()) {
411 Bitmap::Clear(it.next());
416 void MarkCompactCollector::ClearMarkbits() {
417 ClearMarkbitsInPagedSpace(heap_->code_space());
418 ClearMarkbitsInPagedSpace(heap_->map_space());
419 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
420 ClearMarkbitsInPagedSpace(heap_->old_data_space());
421 ClearMarkbitsInPagedSpace(heap_->cell_space());
422 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
423 ClearMarkbitsInNewSpace(heap_->new_space());
425 LargeObjectIterator it(heap_->lo_space());
426 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
427 MarkBit mark_bit = Marking::MarkBitFrom(obj);
429 mark_bit.Next().Clear();
430 Page::FromAddress(obj->address())->ResetProgressBar();
431 Page::FromAddress(obj->address())->ResetLiveBytes();
436 class MarkCompactCollector::SweeperTask : public v8::Task {
438 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
440 virtual ~SweeperTask() {}
443 // v8::Task overrides.
444 virtual void Run() OVERRIDE {
445 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
446 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
452 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
456 void MarkCompactCollector::StartSweeperThreads() {
457 DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
458 DCHECK(free_list_old_data_space_.get()->IsEmpty());
459 V8::GetCurrentPlatform()->CallOnBackgroundThread(
460 new SweeperTask(heap(), heap()->old_data_space()),
461 v8::Platform::kShortRunningTask);
462 V8::GetCurrentPlatform()->CallOnBackgroundThread(
463 new SweeperTask(heap(), heap()->old_pointer_space()),
464 v8::Platform::kShortRunningTask);
468 void MarkCompactCollector::EnsureSweepingCompleted() {
469 DCHECK(sweeping_in_progress_ == true);
471 // If sweeping is not completed or not running at all, we try to complete it
473 if (FLAG_predictable || !IsSweepingCompleted()) {
474 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
475 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
477 // Wait twice for both jobs.
478 if (!FLAG_predictable) {
479 pending_sweeper_jobs_semaphore_.Wait();
480 pending_sweeper_jobs_semaphore_.Wait();
482 ParallelSweepSpacesComplete();
483 sweeping_in_progress_ = false;
484 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
485 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
486 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
487 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
490 if (FLAG_verify_heap) {
491 VerifyEvacuation(heap_);
497 bool MarkCompactCollector::IsSweepingCompleted() {
498 if (!pending_sweeper_jobs_semaphore_.WaitFor(
499 base::TimeDelta::FromSeconds(0))) {
502 pending_sweeper_jobs_semaphore_.Signal();
507 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
510 if (space == heap()->old_pointer_space()) {
511 free_list = free_list_old_pointer_space_.get();
512 } else if (space == heap()->old_data_space()) {
513 free_list = free_list_old_data_space_.get();
515 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
516 // to only refill them for old data and pointer spaces.
520 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
521 space->AddToAccountingStats(freed_bytes);
522 space->DecrementUnsweptFreeBytes(freed_bytes);
526 void Marking::TransferMark(Address old_start, Address new_start) {
527 // This is only used when resizing an object.
528 DCHECK(MemoryChunk::FromAddress(old_start) ==
529 MemoryChunk::FromAddress(new_start));
531 if (!heap_->incremental_marking()->IsMarking()) return;
533 // If the mark doesn't move, we don't check the color of the object.
534 // It doesn't matter whether the object is black, since it hasn't changed
535 // size, so the adjustment to the live data count will be zero anyway.
536 if (old_start == new_start) return;
538 MarkBit new_mark_bit = MarkBitFrom(new_start);
539 MarkBit old_mark_bit = MarkBitFrom(old_start);
542 ObjectColor old_color = Color(old_mark_bit);
545 if (Marking::IsBlack(old_mark_bit)) {
546 old_mark_bit.Clear();
547 DCHECK(IsWhite(old_mark_bit));
548 Marking::MarkBlack(new_mark_bit);
550 } else if (Marking::IsGrey(old_mark_bit)) {
551 old_mark_bit.Clear();
552 old_mark_bit.Next().Clear();
553 DCHECK(IsWhite(old_mark_bit));
554 heap_->incremental_marking()->WhiteToGreyAndPush(
555 HeapObject::FromAddress(new_start), new_mark_bit);
556 heap_->incremental_marking()->RestartIfNotMarking();
560 ObjectColor new_color = Color(new_mark_bit);
561 DCHECK(new_color == old_color);
566 const char* AllocationSpaceName(AllocationSpace space) {
570 case OLD_POINTER_SPACE:
571 return "OLD_POINTER_SPACE";
573 return "OLD_DATA_SPACE";
580 case PROPERTY_CELL_SPACE:
581 return "PROPERTY_CELL_SPACE";
592 // Returns zero for pages that have so little fragmentation that it is not
593 // worth defragmenting them. Otherwise a positive integer that gives an
594 // estimate of fragmentation on an arbitrary scale.
595 static int FreeListFragmentation(PagedSpace* space, Page* p) {
596 // If page was not swept then there are no free list items on it.
597 if (!p->WasSwept()) {
598 if (FLAG_trace_fragmentation) {
599 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
600 AllocationSpaceName(space->identity()), p->LiveBytes());
605 PagedSpace::SizeStats sizes;
606 space->ObtainFreeListStatistics(p, &sizes);
609 intptr_t ratio_threshold;
610 intptr_t area_size = space->AreaSize();
611 if (space->identity() == CODE_SPACE) {
612 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
613 ratio_threshold = 10;
615 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
616 ratio_threshold = 15;
619 if (FLAG_trace_fragmentation) {
620 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
621 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
622 static_cast<int>(sizes.small_size_),
623 static_cast<double>(sizes.small_size_ * 100) / area_size,
624 static_cast<int>(sizes.medium_size_),
625 static_cast<double>(sizes.medium_size_ * 100) / area_size,
626 static_cast<int>(sizes.large_size_),
627 static_cast<double>(sizes.large_size_ * 100) / area_size,
628 static_cast<int>(sizes.huge_size_),
629 static_cast<double>(sizes.huge_size_ * 100) / area_size,
630 (ratio > ratio_threshold) ? "[fragmented]" : "");
633 if (FLAG_always_compact && sizes.Total() != area_size) {
637 if (ratio <= ratio_threshold) return 0; // Not fragmented.
639 return static_cast<int>(ratio - ratio_threshold);
643 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
644 DCHECK(space->identity() == OLD_POINTER_SPACE ||
645 space->identity() == OLD_DATA_SPACE ||
646 space->identity() == CODE_SPACE);
648 static const int kMaxMaxEvacuationCandidates = 1000;
649 int number_of_pages = space->CountTotalPages();
650 int max_evacuation_candidates =
651 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
653 if (FLAG_stress_compaction || FLAG_always_compact) {
654 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
659 Candidate() : fragmentation_(0), page_(NULL) {}
660 Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
662 int fragmentation() { return fragmentation_; }
663 Page* page() { return page_; }
670 enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
672 CompactionMode mode = COMPACT_FREE_LISTS;
674 intptr_t reserved = number_of_pages * space->AreaSize();
675 intptr_t over_reserved = reserved - space->SizeOfObjects();
676 static const intptr_t kFreenessThreshold = 50;
678 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
679 // If reduction of memory footprint was requested, we are aggressive
680 // about choosing pages to free. We expect that half-empty pages
681 // are easier to compact so slightly bump the limit.
682 mode = REDUCE_MEMORY_FOOTPRINT;
683 max_evacuation_candidates += 2;
687 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
688 // If over-usage is very high (more than a third of the space), we
689 // try to free all mostly empty pages. We expect that almost empty
690 // pages are even easier to compact so bump the limit even more.
691 mode = REDUCE_MEMORY_FOOTPRINT;
692 max_evacuation_candidates *= 2;
695 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
697 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
698 "evacuation candidate limit: %d\n",
699 static_cast<double>(over_reserved) / MB,
700 static_cast<double>(reserved) / MB,
701 static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
704 intptr_t estimated_release = 0;
706 Candidate candidates[kMaxMaxEvacuationCandidates];
708 max_evacuation_candidates =
709 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
712 int fragmentation = 0;
713 Candidate* least = NULL;
715 PageIterator it(space);
716 if (it.has_next()) it.next(); // Never compact the first page.
718 while (it.has_next()) {
720 p->ClearEvacuationCandidate();
722 if (FLAG_stress_compaction) {
723 unsigned int counter = space->heap()->ms_count();
724 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
725 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
726 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
727 // Don't try to release too many pages.
728 if (estimated_release >= over_reserved) {
732 intptr_t free_bytes = 0;
734 if (!p->WasSwept()) {
735 free_bytes = (p->area_size() - p->LiveBytes());
737 PagedSpace::SizeStats sizes;
738 space->ObtainFreeListStatistics(p, &sizes);
739 free_bytes = sizes.Total();
742 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
744 if (free_pct >= kFreenessThreshold) {
745 estimated_release += free_bytes;
746 fragmentation = free_pct;
751 if (FLAG_trace_fragmentation) {
752 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
753 AllocationSpaceName(space->identity()),
754 static_cast<int>(free_bytes),
755 static_cast<double>(free_bytes * 100) / p->area_size(),
756 (fragmentation > 0) ? "[fragmented]" : "");
759 fragmentation = FreeListFragmentation(space, p);
762 if (fragmentation != 0) {
763 if (count < max_evacuation_candidates) {
764 candidates[count++] = Candidate(fragmentation, p);
767 for (int i = 0; i < max_evacuation_candidates; i++) {
769 candidates[i].fragmentation() < least->fragmentation()) {
770 least = candidates + i;
774 if (least->fragmentation() < fragmentation) {
775 *least = Candidate(fragmentation, p);
782 for (int i = 0; i < count; i++) {
783 AddEvacuationCandidate(candidates[i].page());
786 if (count > 0 && FLAG_trace_fragmentation) {
787 PrintF("Collected %d evacuation candidates for space %s\n", count,
788 AllocationSpaceName(space->identity()));
793 void MarkCompactCollector::AbortCompaction() {
795 int npages = evacuation_candidates_.length();
796 for (int i = 0; i < npages; i++) {
797 Page* p = evacuation_candidates_[i];
798 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
799 p->ClearEvacuationCandidate();
800 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
803 evacuation_candidates_.Rewind(0);
804 invalidated_code_.Rewind(0);
806 DCHECK_EQ(0, evacuation_candidates_.length());
810 void MarkCompactCollector::Prepare() {
811 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
814 DCHECK(state_ == IDLE);
818 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
820 if (sweeping_in_progress()) {
821 // Instead of waiting we could also abort the sweeper threads here.
822 EnsureSweepingCompleted();
825 // Clear marking bits if incremental marking is aborted.
826 if (was_marked_incrementally_ && abort_incremental_marking_) {
827 heap()->incremental_marking()->Abort();
829 AbortWeakCollections();
832 was_marked_incrementally_ = false;
835 // Don't start compaction if we are in the middle of incremental
836 // marking cycle. We did not collect any slots.
837 if (!FLAG_never_compact && !was_marked_incrementally_) {
838 StartCompaction(NON_INCREMENTAL_COMPACTION);
841 PagedSpaces spaces(heap());
842 for (PagedSpace* space = spaces.next(); space != NULL;
843 space = spaces.next()) {
844 space->PrepareForMarkCompact();
848 if (!was_marked_incrementally_ && FLAG_verify_heap) {
849 VerifyMarkbitsAreClean();
855 void MarkCompactCollector::Finish() {
857 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
860 // The stub cache is not traversed during GC; clear the cache to
861 // force lazy re-initialization of it. This must be done after the
862 // GC, because it relies on the new address of certain old space
863 // objects (empty string, illegal builtin).
864 isolate()->stub_cache()->Clear();
866 if (have_code_to_deoptimize_) {
867 // Some code objects were marked for deoptimization during the GC.
868 Deoptimizer::DeoptimizeMarkedCode(isolate());
869 have_code_to_deoptimize_ = false;
872 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
876 // -------------------------------------------------------------------------
877 // Phase 1: tracing and marking live objects.
878 // before: all objects are in normal state.
879 // after: a live object's map pointer is marked as '00'.
881 // Marking all live objects in the heap as part of mark-sweep or mark-compact
882 // collection. Before marking, all objects are in their normal state. After
883 // marking, live objects' map pointers are marked indicating that the object
884 // has been found reachable.
886 // The marking algorithm is a (mostly) depth-first (because of possible stack
887 // overflow) traversal of the graph of objects reachable from the roots. It
888 // uses an explicit stack of pointers rather than recursion. The young
889 // generation's inactive ('from') space is used as a marking stack. The
890 // objects in the marking stack are the ones that have been reached and marked
891 // but their children have not yet been visited.
893 // The marking stack can overflow during traversal. In that case, we set an
894 // overflow flag. When the overflow flag is set, we continue marking objects
895 // reachable from the objects on the marking stack, but no longer push them on
896 // the marking stack. Instead, we mark them as both marked and overflowed.
897 // When the stack is in the overflowed state, objects marked as overflowed
898 // have been reached and marked but their children have not been visited yet.
899 // After emptying the marking stack, we clear the overflow flag and traverse
900 // the heap looking for objects marked as overflowed, push them on the stack,
901 // and continue with marking. This process repeats until all reachable
902 // objects have been marked.
904 void CodeFlusher::ProcessJSFunctionCandidates() {
905 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
906 Object* undefined = isolate_->heap()->undefined_value();
908 JSFunction* candidate = jsfunction_candidates_head_;
909 JSFunction* next_candidate;
910 while (candidate != NULL) {
911 next_candidate = GetNextCandidate(candidate);
912 ClearNextCandidate(candidate, undefined);
914 SharedFunctionInfo* shared = candidate->shared();
916 Code* code = shared->code();
917 MarkBit code_mark = Marking::MarkBitFrom(code);
918 if (!code_mark.Get()) {
919 if (FLAG_trace_code_flushing && shared->is_compiled()) {
920 PrintF("[code-flushing clears: ");
921 shared->ShortPrint();
922 PrintF(" - age: %d]\n", code->GetAge());
924 shared->set_code(lazy_compile);
925 candidate->set_code(lazy_compile);
927 candidate->set_code(code);
930 // We are in the middle of a GC cycle so the write barrier in the code
931 // setter did not record the slot update and we have to do that manually.
932 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
933 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
934 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
937 Object** shared_code_slot =
938 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
939 isolate_->heap()->mark_compact_collector()->RecordSlot(
940 shared_code_slot, shared_code_slot, *shared_code_slot);
942 candidate = next_candidate;
945 jsfunction_candidates_head_ = NULL;
949 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
950 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
952 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
953 SharedFunctionInfo* next_candidate;
954 while (candidate != NULL) {
955 next_candidate = GetNextCandidate(candidate);
956 ClearNextCandidate(candidate);
958 Code* code = candidate->code();
959 MarkBit code_mark = Marking::MarkBitFrom(code);
960 if (!code_mark.Get()) {
961 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
962 PrintF("[code-flushing clears: ");
963 candidate->ShortPrint();
964 PrintF(" - age: %d]\n", code->GetAge());
966 candidate->set_code(lazy_compile);
970 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
971 isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
974 candidate = next_candidate;
977 shared_function_info_candidates_head_ = NULL;
981 void CodeFlusher::ProcessOptimizedCodeMaps() {
982 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
984 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
985 SharedFunctionInfo* next_holder;
987 while (holder != NULL) {
988 next_holder = GetNextCodeMap(holder);
989 ClearNextCodeMap(holder);
991 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
992 int new_length = SharedFunctionInfo::kEntriesStart;
993 int old_length = code_map->length();
994 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
995 i += SharedFunctionInfo::kEntryLength) {
997 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
998 if (!Marking::MarkBitFrom(code).Get()) continue;
1000 // Move every slot in the entry.
1001 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1002 int dst_index = new_length++;
1003 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1004 Object* object = code_map->get(i + j);
1005 code_map->set(dst_index, object);
1006 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1007 DCHECK(object->IsSmi());
1010 Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1011 isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1017 // Trim the optimized code map if entries have been removed.
1018 if (new_length < old_length) {
1019 holder->TrimOptimizedCodeMap(old_length - new_length);
1022 holder = next_holder;
1025 optimized_code_map_holder_head_ = NULL;
1029 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1030 // Make sure previous flushing decisions are revisited.
1031 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1033 if (FLAG_trace_code_flushing) {
1034 PrintF("[code-flushing abandons function-info: ");
1035 shared_info->ShortPrint();
1039 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1040 SharedFunctionInfo* next_candidate;
1041 if (candidate == shared_info) {
1042 next_candidate = GetNextCandidate(shared_info);
1043 shared_function_info_candidates_head_ = next_candidate;
1044 ClearNextCandidate(shared_info);
1046 while (candidate != NULL) {
1047 next_candidate = GetNextCandidate(candidate);
1049 if (next_candidate == shared_info) {
1050 next_candidate = GetNextCandidate(shared_info);
1051 SetNextCandidate(candidate, next_candidate);
1052 ClearNextCandidate(shared_info);
1056 candidate = next_candidate;
1062 void CodeFlusher::EvictCandidate(JSFunction* function) {
1063 DCHECK(!function->next_function_link()->IsUndefined());
1064 Object* undefined = isolate_->heap()->undefined_value();
1066 // Make sure previous flushing decisions are revisited.
1067 isolate_->heap()->incremental_marking()->RecordWrites(function);
1068 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1070 if (FLAG_trace_code_flushing) {
1071 PrintF("[code-flushing abandons closure: ");
1072 function->shared()->ShortPrint();
1076 JSFunction* candidate = jsfunction_candidates_head_;
1077 JSFunction* next_candidate;
1078 if (candidate == function) {
1079 next_candidate = GetNextCandidate(function);
1080 jsfunction_candidates_head_ = next_candidate;
1081 ClearNextCandidate(function, undefined);
1083 while (candidate != NULL) {
1084 next_candidate = GetNextCandidate(candidate);
1086 if (next_candidate == function) {
1087 next_candidate = GetNextCandidate(function);
1088 SetNextCandidate(candidate, next_candidate);
1089 ClearNextCandidate(function, undefined);
1093 candidate = next_candidate;
1099 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1100 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1101 ->get(SharedFunctionInfo::kNextMapIndex)
1104 // Make sure previous flushing decisions are revisited.
1105 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1107 if (FLAG_trace_code_flushing) {
1108 PrintF("[code-flushing abandons code-map: ");
1109 code_map_holder->ShortPrint();
1113 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1114 SharedFunctionInfo* next_holder;
1115 if (holder == code_map_holder) {
1116 next_holder = GetNextCodeMap(code_map_holder);
1117 optimized_code_map_holder_head_ = next_holder;
1118 ClearNextCodeMap(code_map_holder);
1120 while (holder != NULL) {
1121 next_holder = GetNextCodeMap(holder);
1123 if (next_holder == code_map_holder) {
1124 next_holder = GetNextCodeMap(code_map_holder);
1125 SetNextCodeMap(holder, next_holder);
1126 ClearNextCodeMap(code_map_holder);
1130 holder = next_holder;
1136 void CodeFlusher::EvictJSFunctionCandidates() {
1137 JSFunction* candidate = jsfunction_candidates_head_;
1138 JSFunction* next_candidate;
1139 while (candidate != NULL) {
1140 next_candidate = GetNextCandidate(candidate);
1141 EvictCandidate(candidate);
1142 candidate = next_candidate;
1144 DCHECK(jsfunction_candidates_head_ == NULL);
1148 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1149 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1150 SharedFunctionInfo* next_candidate;
1151 while (candidate != NULL) {
1152 next_candidate = GetNextCandidate(candidate);
1153 EvictCandidate(candidate);
1154 candidate = next_candidate;
1156 DCHECK(shared_function_info_candidates_head_ == NULL);
1160 void CodeFlusher::EvictOptimizedCodeMaps() {
1161 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1162 SharedFunctionInfo* next_holder;
1163 while (holder != NULL) {
1164 next_holder = GetNextCodeMap(holder);
1165 EvictOptimizedCodeMap(holder);
1166 holder = next_holder;
1168 DCHECK(optimized_code_map_holder_head_ == NULL);
1172 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1173 Heap* heap = isolate_->heap();
1175 JSFunction** slot = &jsfunction_candidates_head_;
1176 JSFunction* candidate = jsfunction_candidates_head_;
1177 while (candidate != NULL) {
1178 if (heap->InFromSpace(candidate)) {
1179 v->VisitPointer(reinterpret_cast<Object**>(slot));
1181 candidate = GetNextCandidate(*slot);
1182 slot = GetNextCandidateSlot(*slot);
1187 MarkCompactCollector::~MarkCompactCollector() {
1188 if (code_flusher_ != NULL) {
1189 delete code_flusher_;
1190 code_flusher_ = NULL;
1195 static inline HeapObject* ShortCircuitConsString(Object** p) {
1196 // Optimization: If the heap object pointed to by p is a non-internalized
1197 // cons string whose right substring is HEAP->empty_string, update
1198 // it in place to its left substring. Return the updated value.
1200 // Here we assume that if we change *p, we replace it with a heap object
1201 // (i.e., the left substring of a cons string is always a heap object).
1203 // The check performed is:
1204 // object->IsConsString() && !object->IsInternalizedString() &&
1205 // (ConsString::cast(object)->second() == HEAP->empty_string())
1206 // except the maps for the object and its possible substrings might be
1208 HeapObject* object = HeapObject::cast(*p);
1209 if (!FLAG_clever_optimizations) return object;
1210 Map* map = object->map();
1211 InstanceType type = map->instance_type();
1212 if (!IsShortcutCandidate(type)) return object;
1214 Object* second = reinterpret_cast<ConsString*>(object)->second();
1215 Heap* heap = map->GetHeap();
1216 if (second != heap->empty_string()) {
1220 // Since we don't have the object's start, it is impossible to update the
1221 // page dirty marks. Therefore, we only replace the string with its left
1222 // substring when page dirty marks do not change.
1223 Object* first = reinterpret_cast<ConsString*>(object)->first();
1224 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1227 return HeapObject::cast(first);
1231 class MarkCompactMarkingVisitor
1232 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1234 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
1237 static void ObjectStatsCountFixedArray(
1238 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1239 FixedArraySubInstanceType dictionary_type);
1241 template <MarkCompactMarkingVisitor::VisitorId id>
1242 class ObjectStatsTracker {
1244 static inline void Visit(Map* map, HeapObject* obj);
1247 static void Initialize();
1249 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1250 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1253 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1254 // Mark all objects pointed to in [start, end).
1255 const int kMinRangeForMarkingRecursion = 64;
1256 if (end - start >= kMinRangeForMarkingRecursion) {
1257 if (VisitUnmarkedObjects(heap, start, end)) return;
1258 // We are close to a stack overflow, so just mark the objects.
1260 MarkCompactCollector* collector = heap->mark_compact_collector();
1261 for (Object** p = start; p < end; p++) {
1262 MarkObjectByPointer(collector, start, p);
1266 // Marks the object black and pushes it on the marking stack.
1267 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1268 MarkBit mark = Marking::MarkBitFrom(object);
1269 heap->mark_compact_collector()->MarkObject(object, mark);
1272 // Marks the object black without pushing it on the marking stack.
1273 // Returns true if object needed marking and false otherwise.
1274 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1275 MarkBit mark_bit = Marking::MarkBitFrom(object);
1276 if (!mark_bit.Get()) {
1277 heap->mark_compact_collector()->SetMark(object, mark_bit);
1283 // Mark object pointed to by p.
1284 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1285 Object** anchor_slot, Object** p)) {
1286 if (!(*p)->IsHeapObject()) return;
1287 HeapObject* object = ShortCircuitConsString(p);
1288 collector->RecordSlot(anchor_slot, p, object);
1289 MarkBit mark = Marking::MarkBitFrom(object);
1290 collector->MarkObject(object, mark);
1294 // Visit an unmarked object.
1295 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1298 DCHECK(collector->heap()->Contains(obj));
1299 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1301 Map* map = obj->map();
1302 Heap* heap = obj->GetHeap();
1303 MarkBit mark = Marking::MarkBitFrom(obj);
1304 heap->mark_compact_collector()->SetMark(obj, mark);
1305 // Mark the map pointer and the body.
1306 MarkBit map_mark = Marking::MarkBitFrom(map);
1307 heap->mark_compact_collector()->MarkObject(map, map_mark);
1308 IterateBody(map, obj);
1311 // Visit all unmarked objects pointed to by [start, end).
1312 // Returns false if the operation fails (lack of stack space).
1313 INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1315 // Return false is we are close to the stack limit.
1316 StackLimitCheck check(heap->isolate());
1317 if (check.HasOverflowed()) return false;
1319 MarkCompactCollector* collector = heap->mark_compact_collector();
1320 // Visit the unmarked objects.
1321 for (Object** p = start; p < end; p++) {
1323 if (!o->IsHeapObject()) continue;
1324 collector->RecordSlot(start, p, o);
1325 HeapObject* obj = HeapObject::cast(o);
1326 MarkBit mark = Marking::MarkBitFrom(obj);
1327 if (mark.Get()) continue;
1328 VisitUnmarkedObject(collector, obj);
1335 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1337 // Code flushing support.
1339 static const int kRegExpCodeThreshold = 5;
1341 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1343 // Make sure that the fixed array is in fact initialized on the RegExp.
1344 // We could potentially trigger a GC when initializing the RegExp.
1345 if (HeapObject::cast(re->data())->map()->instance_type() !=
1349 // Make sure this is a RegExp that actually contains code.
1350 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1352 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1353 if (!code->IsSmi() &&
1354 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1355 // Save a copy that can be reinstated if we need the code again.
1356 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1358 // Saving a copy might create a pointer into compaction candidate
1359 // that was not observed by marker. This might happen if JSRegExp data
1360 // was marked through the compilation cache before marker reached JSRegExp
1362 FixedArray* data = FixedArray::cast(re->data());
1364 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1365 heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1367 // Set a number in the 0-255 range to guarantee no smi overflow.
1368 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1369 Smi::FromInt(heap->sweep_generation() & 0xff));
1370 } else if (code->IsSmi()) {
1371 int value = Smi::cast(code)->value();
1372 // The regexp has not been compiled yet or there was a compilation error.
1373 if (value == JSRegExp::kUninitializedValue ||
1374 value == JSRegExp::kCompilationErrorValue) {
1378 // Check if we should flush now.
1379 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1380 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1381 Smi::FromInt(JSRegExp::kUninitializedValue));
1382 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1383 Smi::FromInt(JSRegExp::kUninitializedValue));
1389 // Works by setting the current sweep_generation (as a smi) in the
1390 // code object place in the data array of the RegExp and keeps a copy
1391 // around that can be reinstated if we reuse the RegExp before flushing.
1392 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1393 // we flush the code.
1394 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1395 Heap* heap = map->GetHeap();
1396 MarkCompactCollector* collector = heap->mark_compact_collector();
1397 if (!collector->is_code_flushing_enabled()) {
1398 VisitJSRegExp(map, object);
1401 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1402 // Flush code or set age on both one byte and two byte code.
1403 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1404 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1405 // Visit the fields of the RegExp, including the updated FixedArray.
1406 VisitJSRegExp(map, object);
1409 static VisitorDispatchTable<Callback> non_count_table_;
1413 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1414 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1415 FixedArraySubInstanceType dictionary_type) {
1416 Heap* heap = fixed_array->map()->GetHeap();
1417 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1418 fixed_array->map() != heap->fixed_double_array_map() &&
1419 fixed_array != heap->empty_fixed_array()) {
1420 if (fixed_array->IsDictionary()) {
1421 heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1423 heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1429 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1430 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1431 Heap* heap = map->GetHeap();
1432 int object_size = obj->Size();
1433 heap->RecordObjectStats(map->instance_type(), object_size);
1434 non_count_table_.GetVisitorById(id)(map, obj);
1435 if (obj->IsJSObject()) {
1436 JSObject* object = JSObject::cast(obj);
1437 ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1438 FAST_ELEMENTS_SUB_TYPE);
1439 ObjectStatsCountFixedArray(object->properties(),
1440 DICTIONARY_PROPERTIES_SUB_TYPE,
1441 FAST_PROPERTIES_SUB_TYPE);
1446 template <MarkCompactMarkingVisitor::VisitorId id>
1447 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
1449 ObjectStatsVisitBase(id, map, obj);
1454 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1455 MarkCompactMarkingVisitor::kVisitMap> {
1457 static inline void Visit(Map* map, HeapObject* obj) {
1458 Heap* heap = map->GetHeap();
1459 Map* map_obj = Map::cast(obj);
1460 DCHECK(map->instance_type() == MAP_TYPE);
1461 DescriptorArray* array = map_obj->instance_descriptors();
1462 if (map_obj->owns_descriptors() &&
1463 array != heap->empty_descriptor_array()) {
1464 int fixed_array_size = array->Size();
1465 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1468 if (map_obj->HasTransitionArray()) {
1469 int fixed_array_size = map_obj->transitions()->Size();
1470 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1473 if (map_obj->has_code_cache()) {
1474 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1475 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1476 cache->default_cache()->Size());
1477 if (!cache->normal_type_cache()->IsUndefined()) {
1478 heap->RecordFixedArraySubTypeStats(
1479 MAP_CODE_CACHE_SUB_TYPE,
1480 FixedArray::cast(cache->normal_type_cache())->Size());
1483 ObjectStatsVisitBase(kVisitMap, map, obj);
1489 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1490 MarkCompactMarkingVisitor::kVisitCode> {
1492 static inline void Visit(Map* map, HeapObject* obj) {
1493 Heap* heap = map->GetHeap();
1494 int object_size = obj->Size();
1495 DCHECK(map->instance_type() == CODE_TYPE);
1496 Code* code_obj = Code::cast(obj);
1497 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1499 ObjectStatsVisitBase(kVisitCode, map, obj);
1505 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1506 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1508 static inline void Visit(Map* map, HeapObject* obj) {
1509 Heap* heap = map->GetHeap();
1510 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1511 if (sfi->scope_info() != heap->empty_fixed_array()) {
1512 heap->RecordFixedArraySubTypeStats(
1513 SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1515 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1521 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1522 MarkCompactMarkingVisitor::kVisitFixedArray> {
1524 static inline void Visit(Map* map, HeapObject* obj) {
1525 Heap* heap = map->GetHeap();
1526 FixedArray* fixed_array = FixedArray::cast(obj);
1527 if (fixed_array == heap->string_table()) {
1528 heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1529 fixed_array->Size());
1531 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1536 void MarkCompactMarkingVisitor::Initialize() {
1537 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1539 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1541 if (FLAG_track_gc_object_stats) {
1542 // Copy the visitor table to make call-through possible.
1543 non_count_table_.CopyFrom(&table_);
1544 #define VISITOR_ID_COUNT_FUNCTION(id) \
1545 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1546 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1547 #undef VISITOR_ID_COUNT_FUNCTION
1552 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1553 MarkCompactMarkingVisitor::non_count_table_;
1556 class CodeMarkingVisitor : public ThreadVisitor {
1558 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1559 : collector_(collector) {}
1561 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1562 collector_->PrepareThreadForCodeFlushing(isolate, top);
1566 MarkCompactCollector* collector_;
1570 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1572 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1573 : collector_(collector) {}
1575 void VisitPointers(Object** start, Object** end) {
1576 for (Object** p = start; p < end; p++) VisitPointer(p);
1579 void VisitPointer(Object** slot) {
1580 Object* obj = *slot;
1581 if (obj->IsSharedFunctionInfo()) {
1582 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1583 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1584 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1585 collector_->MarkObject(shared->code(), code_mark);
1586 collector_->MarkObject(shared, shared_mark);
1591 MarkCompactCollector* collector_;
1595 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1596 ThreadLocalTop* top) {
1597 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1598 // Note: for the frame that has a pending lazy deoptimization
1599 // StackFrame::unchecked_code will return a non-optimized code object for
1600 // the outermost function and StackFrame::LookupCode will return
1601 // actual optimized code object.
1602 StackFrame* frame = it.frame();
1603 Code* code = frame->unchecked_code();
1604 MarkBit code_mark = Marking::MarkBitFrom(code);
1605 MarkObject(code, code_mark);
1606 if (frame->is_optimized()) {
1607 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1608 frame->LookupCode());
1614 void MarkCompactCollector::PrepareForCodeFlushing() {
1615 // Enable code flushing for non-incremental cycles.
1616 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1617 EnableCodeFlushing(!was_marked_incrementally_);
1620 // If code flushing is disabled, there is no need to prepare for it.
1621 if (!is_code_flushing_enabled()) return;
1623 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1624 // relies on it being marked before any other descriptor array.
1625 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1626 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1627 MarkObject(descriptor_array, descriptor_array_mark);
1629 // Make sure we are not referencing the code from the stack.
1630 DCHECK(this == heap()->mark_compact_collector());
1631 PrepareThreadForCodeFlushing(heap()->isolate(),
1632 heap()->isolate()->thread_local_top());
1634 // Iterate the archived stacks in all threads to check if
1635 // the code is referenced.
1636 CodeMarkingVisitor code_marking_visitor(this);
1637 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1638 &code_marking_visitor);
1640 SharedFunctionInfoMarkingVisitor visitor(this);
1641 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1642 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1644 ProcessMarkingDeque();
1648 // Visitor class for marking heap roots.
1649 class RootMarkingVisitor : public ObjectVisitor {
1651 explicit RootMarkingVisitor(Heap* heap)
1652 : collector_(heap->mark_compact_collector()) {}
1654 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
1656 void VisitPointers(Object** start, Object** end) {
1657 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1660 // Skip the weak next code link in a code object, which is visited in
1661 // ProcessTopOptimizedFrame.
1662 void VisitNextCodeLink(Object** p) {}
1665 void MarkObjectByPointer(Object** p) {
1666 if (!(*p)->IsHeapObject()) return;
1668 // Replace flat cons strings in place.
1669 HeapObject* object = ShortCircuitConsString(p);
1670 MarkBit mark_bit = Marking::MarkBitFrom(object);
1671 if (mark_bit.Get()) return;
1673 Map* map = object->map();
1675 collector_->SetMark(object, mark_bit);
1677 // Mark the map pointer and body, and push them on the marking stack.
1678 MarkBit map_mark = Marking::MarkBitFrom(map);
1679 collector_->MarkObject(map, map_mark);
1680 MarkCompactMarkingVisitor::IterateBody(map, object);
1682 // Mark all the objects reachable from the map and body. May leave
1683 // overflowed objects in the heap.
1684 collector_->EmptyMarkingDeque();
1687 MarkCompactCollector* collector_;
1691 // Helper class for pruning the string table.
1692 template <bool finalize_external_strings>
1693 class StringTableCleaner : public ObjectVisitor {
1695 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1697 virtual void VisitPointers(Object** start, Object** end) {
1698 // Visit all HeapObject pointers in [start, end).
1699 for (Object** p = start; p < end; p++) {
1701 if (o->IsHeapObject() &&
1702 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1703 if (finalize_external_strings) {
1704 DCHECK(o->IsExternalString());
1705 heap_->FinalizeExternalString(String::cast(*p));
1707 pointers_removed_++;
1709 // Set the entry to the_hole_value (as deleted).
1710 *p = heap_->the_hole_value();
1715 int PointersRemoved() {
1716 DCHECK(!finalize_external_strings);
1717 return pointers_removed_;
1722 int pointers_removed_;
1726 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1727 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1730 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1732 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1734 virtual Object* RetainAs(Object* object) {
1735 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1737 } else if (object->IsAllocationSite() &&
1738 !(AllocationSite::cast(object)->IsZombie())) {
1739 // "dead" AllocationSites need to live long enough for a traversal of new
1740 // space. These sites get a one-time reprieve.
1741 AllocationSite* site = AllocationSite::cast(object);
1743 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1752 // Fill the marking stack with overflowed objects returned by the given
1753 // iterator. Stop when the marking stack is filled or the end of the space
1754 // is reached, whichever comes first.
1756 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1757 MarkingDeque* marking_deque,
1759 // The caller should ensure that the marking stack is initially not full,
1760 // so that we don't waste effort pointlessly scanning for objects.
1761 DCHECK(!marking_deque->IsFull());
1763 Map* filler_map = heap->one_pointer_filler_map();
1764 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1765 MarkBit markbit = Marking::MarkBitFrom(object);
1766 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1767 Marking::GreyToBlack(markbit);
1768 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1769 marking_deque->PushBlack(object);
1770 if (marking_deque->IsFull()) return;
1776 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1779 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1781 DCHECK(!marking_deque->IsFull());
1782 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1783 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1784 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1785 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1787 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1788 Address cell_base = it.CurrentCellBase();
1789 MarkBit::CellType* cell = it.CurrentCell();
1791 const MarkBit::CellType current_cell = *cell;
1792 if (current_cell == 0) continue;
1794 MarkBit::CellType grey_objects;
1796 const MarkBit::CellType next_cell = *(cell + 1);
1797 grey_objects = current_cell & ((current_cell >> 1) |
1798 (next_cell << (Bitmap::kBitsPerCell - 1)));
1800 grey_objects = current_cell & (current_cell >> 1);
1804 while (grey_objects != 0) {
1805 int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1806 grey_objects >>= trailing_zeros;
1807 offset += trailing_zeros;
1808 MarkBit markbit(cell, 1 << offset, false);
1809 DCHECK(Marking::IsGrey(markbit));
1810 Marking::GreyToBlack(markbit);
1811 Address addr = cell_base + offset * kPointerSize;
1812 HeapObject* object = HeapObject::FromAddress(addr);
1813 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1814 marking_deque->PushBlack(object);
1815 if (marking_deque->IsFull()) return;
1820 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1825 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1826 NewSpace* new_space, NewSpacePage* p) {
1827 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1828 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1829 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1830 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1832 MarkBit::CellType* cells = p->markbits()->cells();
1833 int survivors_size = 0;
1835 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1836 Address cell_base = it.CurrentCellBase();
1837 MarkBit::CellType* cell = it.CurrentCell();
1839 MarkBit::CellType current_cell = *cell;
1840 if (current_cell == 0) continue;
1843 while (current_cell != 0) {
1844 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1845 current_cell >>= trailing_zeros;
1846 offset += trailing_zeros;
1847 Address address = cell_base + offset * kPointerSize;
1848 HeapObject* object = HeapObject::FromAddress(address);
1850 int size = object->Size();
1851 survivors_size += size;
1853 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1858 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1859 if (heap()->ShouldBePromoted(object->address(), size) &&
1860 TryPromoteObject(object, size)) {
1864 AllocationResult allocation = new_space->AllocateRaw(size);
1865 if (allocation.IsRetry()) {
1866 if (!new_space->AddFreshPage()) {
1867 // Shouldn't happen. We are sweeping linearly, and to-space
1868 // has the same number of pages as from-space, so there is
1872 allocation = new_space->AllocateRaw(size);
1873 DCHECK(!allocation.IsRetry());
1875 Object* target = allocation.ToObjectChecked();
1877 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
1878 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1882 return survivors_size;
1886 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
1887 PagedSpace* space) {
1888 PageIterator it(space);
1889 while (it.has_next()) {
1890 Page* p = it.next();
1891 DiscoverGreyObjectsOnPage(marking_deque, p);
1892 if (marking_deque->IsFull()) return;
1897 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
1898 MarkingDeque* marking_deque) {
1899 NewSpace* space = heap->new_space();
1900 NewSpacePageIterator it(space->bottom(), space->top());
1901 while (it.has_next()) {
1902 NewSpacePage* page = it.next();
1903 DiscoverGreyObjectsOnPage(marking_deque, page);
1904 if (marking_deque->IsFull()) return;
1909 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1911 if (!o->IsHeapObject()) return false;
1912 HeapObject* heap_object = HeapObject::cast(o);
1913 MarkBit mark = Marking::MarkBitFrom(heap_object);
1918 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1921 DCHECK(o->IsHeapObject());
1922 HeapObject* heap_object = HeapObject::cast(o);
1923 MarkBit mark = Marking::MarkBitFrom(heap_object);
1928 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1929 StringTable* string_table = heap()->string_table();
1930 // Mark the string table itself.
1931 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1932 if (!string_table_mark.Get()) {
1933 // String table could have already been marked by visiting the handles list.
1934 SetMark(string_table, string_table_mark);
1936 // Explicitly mark the prefix.
1937 string_table->IteratePrefix(visitor);
1938 ProcessMarkingDeque();
1942 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1943 MarkBit mark_bit = Marking::MarkBitFrom(site);
1944 SetMark(site, mark_bit);
1948 bool MarkCompactCollector::IsMarkingDequeEmpty() {
1949 return marking_deque_.IsEmpty();
1953 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1954 // Mark the heap roots including global variables, stack variables,
1955 // etc., and all objects reachable from them.
1956 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1958 // Handle the string table specially.
1959 MarkStringTable(visitor);
1961 MarkWeakObjectToCodeTable();
1963 // There may be overflowed objects in the heap. Visit them now.
1964 while (marking_deque_.overflowed()) {
1965 RefillMarkingDeque();
1966 EmptyMarkingDeque();
1971 void MarkCompactCollector::MarkImplicitRefGroups() {
1972 List<ImplicitRefGroup*>* ref_groups =
1973 isolate()->global_handles()->implicit_ref_groups();
1976 for (int i = 0; i < ref_groups->length(); i++) {
1977 ImplicitRefGroup* entry = ref_groups->at(i);
1978 DCHECK(entry != NULL);
1980 if (!IsMarked(*entry->parent)) {
1981 (*ref_groups)[last++] = entry;
1985 Object*** children = entry->children;
1986 // A parent object is marked, so mark all child heap objects.
1987 for (size_t j = 0; j < entry->length; ++j) {
1988 if ((*children[j])->IsHeapObject()) {
1989 HeapObject* child = HeapObject::cast(*children[j]);
1990 MarkBit mark = Marking::MarkBitFrom(child);
1991 MarkObject(child, mark);
1995 // Once the entire group has been marked, dispose it because it's
1996 // not needed anymore.
1999 ref_groups->Rewind(last);
2003 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2004 HeapObject* weak_object_to_code_table =
2005 HeapObject::cast(heap()->weak_object_to_code_table());
2006 if (!IsMarked(weak_object_to_code_table)) {
2007 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2008 SetMark(weak_object_to_code_table, mark);
2013 // Mark all objects reachable from the objects on the marking stack.
2014 // Before: the marking stack contains zero or more heap object pointers.
2015 // After: the marking stack is empty, and all objects reachable from the
2016 // marking stack have been marked, or are overflowed in the heap.
2017 void MarkCompactCollector::EmptyMarkingDeque() {
2018 while (!marking_deque_.IsEmpty()) {
2019 HeapObject* object = marking_deque_.Pop();
2020 DCHECK(object->IsHeapObject());
2021 DCHECK(heap()->Contains(object));
2022 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2024 Map* map = object->map();
2025 MarkBit map_mark = Marking::MarkBitFrom(map);
2026 MarkObject(map, map_mark);
2028 MarkCompactMarkingVisitor::IterateBody(map, object);
2033 // Sweep the heap for overflowed objects, clear their overflow bits, and
2034 // push them on the marking stack. Stop early if the marking stack fills
2035 // before sweeping completes. If sweeping completes, there are no remaining
2036 // overflowed objects in the heap so the overflow flag on the markings stack
2038 void MarkCompactCollector::RefillMarkingDeque() {
2039 DCHECK(marking_deque_.overflowed());
2041 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2042 if (marking_deque_.IsFull()) return;
2044 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2045 heap()->old_pointer_space());
2046 if (marking_deque_.IsFull()) return;
2048 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2049 if (marking_deque_.IsFull()) return;
2051 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2052 if (marking_deque_.IsFull()) return;
2054 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2055 if (marking_deque_.IsFull()) return;
2057 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2058 if (marking_deque_.IsFull()) return;
2060 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2061 heap()->property_cell_space());
2062 if (marking_deque_.IsFull()) return;
2064 LargeObjectIterator lo_it(heap()->lo_space());
2065 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
2066 if (marking_deque_.IsFull()) return;
2068 marking_deque_.ClearOverflowed();
2072 // Mark all objects reachable (transitively) from objects on the marking
2073 // stack. Before: the marking stack contains zero or more heap object
2074 // pointers. After: the marking stack is empty and there are no overflowed
2075 // objects in the heap.
2076 void MarkCompactCollector::ProcessMarkingDeque() {
2077 EmptyMarkingDeque();
2078 while (marking_deque_.overflowed()) {
2079 RefillMarkingDeque();
2080 EmptyMarkingDeque();
2085 // Mark all objects reachable (transitively) from objects on the marking
2086 // stack including references only considered in the atomic marking pause.
2087 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2088 bool work_to_do = true;
2089 DCHECK(marking_deque_.IsEmpty());
2090 while (work_to_do) {
2091 isolate()->global_handles()->IterateObjectGroups(
2092 visitor, &IsUnmarkedHeapObjectWithHeap);
2093 MarkImplicitRefGroups();
2094 ProcessWeakCollections();
2095 work_to_do = !marking_deque_.IsEmpty();
2096 ProcessMarkingDeque();
2101 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2102 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2103 !it.done(); it.Advance()) {
2104 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2107 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2108 Code* code = it.frame()->LookupCode();
2109 if (!code->CanDeoptAt(it.frame()->pc())) {
2110 code->CodeIterateBody(visitor);
2112 ProcessMarkingDeque();
2119 void MarkCompactCollector::MarkLiveObjects() {
2120 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2121 double start_time = 0.0;
2122 if (FLAG_print_cumulative_gc_stat) {
2123 start_time = base::OS::TimeCurrentMillis();
2125 // The recursive GC marker detects when it is nearing stack overflow,
2126 // and switches to a different marking system. JS interrupts interfere
2127 // with the C stack limit check.
2128 PostponeInterruptsScope postpone(isolate());
2130 bool incremental_marking_overflowed = false;
2131 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2132 if (was_marked_incrementally_) {
2133 // Finalize the incremental marking and check whether we had an overflow.
2134 // Both markers use grey color to mark overflowed objects so
2135 // non-incremental marker can deal with them as if overflow
2136 // occured during normal marking.
2137 // But incremental marker uses a separate marking deque
2138 // so we have to explicitly copy its overflow state.
2139 incremental_marking->Finalize();
2140 incremental_marking_overflowed =
2141 incremental_marking->marking_deque()->overflowed();
2142 incremental_marking->marking_deque()->ClearOverflowed();
2144 // Abort any pending incremental activities e.g. incremental sweeping.
2145 incremental_marking->Abort();
2149 DCHECK(state_ == PREPARE_GC);
2150 state_ = MARK_LIVE_OBJECTS;
2152 // The to space contains live objects, a page in from space is used as a
2154 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2155 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2156 if (FLAG_force_marking_deque_overflows) {
2157 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2159 marking_deque_.Initialize(marking_deque_start, marking_deque_end);
2160 DCHECK(!marking_deque_.overflowed());
2162 if (incremental_marking_overflowed) {
2163 // There are overflowed objects left in the heap after incremental marking.
2164 marking_deque_.SetOverflowed();
2167 PrepareForCodeFlushing();
2169 if (was_marked_incrementally_) {
2170 // There is no write barrier on cells so we have to scan them now at the end
2171 // of the incremental marking.
2173 HeapObjectIterator cell_iterator(heap()->cell_space());
2175 while ((cell = cell_iterator.Next()) != NULL) {
2176 DCHECK(cell->IsCell());
2177 if (IsMarked(cell)) {
2178 int offset = Cell::kValueOffset;
2179 MarkCompactMarkingVisitor::VisitPointer(
2180 heap(), reinterpret_cast<Object**>(cell->address() + offset));
2185 HeapObjectIterator js_global_property_cell_iterator(
2186 heap()->property_cell_space());
2188 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2189 DCHECK(cell->IsPropertyCell());
2190 if (IsMarked(cell)) {
2191 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2197 RootMarkingVisitor root_visitor(heap());
2198 MarkRoots(&root_visitor);
2200 ProcessTopOptimizedFrame(&root_visitor);
2202 // The objects reachable from the roots are marked, yet unreachable
2203 // objects are unmarked. Mark objects reachable due to host
2204 // application specific logic or through Harmony weak maps.
2205 ProcessEphemeralMarking(&root_visitor);
2207 // The objects reachable from the roots, weak maps or object groups
2208 // are marked, yet unreachable objects are unmarked. Mark objects
2209 // reachable only from weak global handles.
2211 // First we identify nonlive weak handles and mark them as pending
2213 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2214 &IsUnmarkedHeapObject);
2215 // Then we mark the objects and process the transitive closure.
2216 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2217 while (marking_deque_.overflowed()) {
2218 RefillMarkingDeque();
2219 EmptyMarkingDeque();
2222 // Repeat host application specific and Harmony weak maps marking to
2223 // mark unmarked objects reachable from the weak roots.
2224 ProcessEphemeralMarking(&root_visitor);
2228 if (FLAG_print_cumulative_gc_stat) {
2229 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2234 void MarkCompactCollector::AfterMarking() {
2235 // Object literal map caches reference strings (cache keys) and maps
2236 // (cache values). At this point still useful maps have already been
2237 // marked. Mark the keys for the alive values before we process the
2241 // Prune the string table removing all strings only pointed to by the
2242 // string table. Cannot use string_table() here because the string
2244 StringTable* string_table = heap()->string_table();
2245 InternalizedStringTableCleaner internalized_visitor(heap());
2246 string_table->IterateElements(&internalized_visitor);
2247 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2249 ExternalStringTableCleaner external_visitor(heap());
2250 heap()->external_string_table_.Iterate(&external_visitor);
2251 heap()->external_string_table_.CleanUp();
2253 // Process the weak references.
2254 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2255 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2257 // Remove object groups after marking phase.
2258 heap()->isolate()->global_handles()->RemoveObjectGroups();
2259 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2261 // Flush code from collected candidates.
2262 if (is_code_flushing_enabled()) {
2263 code_flusher_->ProcessCandidates();
2264 // If incremental marker does not support code flushing, we need to
2265 // disable it before incremental marking steps for next cycle.
2266 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2267 EnableCodeFlushing(false);
2271 if (FLAG_track_gc_object_stats) {
2272 heap()->CheckpointObjectStats();
2277 void MarkCompactCollector::ProcessMapCaches() {
2278 Object* raw_context = heap()->native_contexts_list();
2279 while (raw_context != heap()->undefined_value()) {
2280 Context* context = reinterpret_cast<Context*>(raw_context);
2281 if (IsMarked(context)) {
2282 HeapObject* raw_map_cache =
2283 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2284 // A map cache may be reachable from the stack. In this case
2285 // it's already transitively marked and it's too late to clean
2287 if (!IsMarked(raw_map_cache) &&
2288 raw_map_cache != heap()->undefined_value()) {
2289 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2290 int existing_elements = map_cache->NumberOfElements();
2291 int used_elements = 0;
2292 for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
2293 i += MapCache::kEntrySize) {
2294 Object* raw_key = map_cache->get(i);
2295 if (raw_key == heap()->undefined_value() ||
2296 raw_key == heap()->the_hole_value())
2298 STATIC_ASSERT(MapCache::kEntrySize == 2);
2299 Object* raw_map = map_cache->get(i + 1);
2300 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2303 // Delete useless entries with unmarked maps.
2304 DCHECK(raw_map->IsMap());
2305 map_cache->set_the_hole(i);
2306 map_cache->set_the_hole(i + 1);
2309 if (used_elements == 0) {
2310 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2312 // Note: we don't actually shrink the cache here to avoid
2313 // extra complexity during GC. We rely on subsequent cache
2314 // usages (EnsureCapacity) to do this.
2315 map_cache->ElementsRemoved(existing_elements - used_elements);
2316 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2317 MarkObject(map_cache, map_cache_markbit);
2321 // Move to next element in the list.
2322 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2324 ProcessMarkingDeque();
2328 void MarkCompactCollector::ClearNonLiveReferences() {
2329 // Iterate over the map space, setting map transitions that go from
2330 // a marked map to an unmarked map to null transitions. This action
2331 // is carried out only on maps of JSObjects and related subtypes.
2332 HeapObjectIterator map_iterator(heap()->map_space());
2333 for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2334 obj = map_iterator.Next()) {
2335 Map* map = Map::cast(obj);
2337 if (!map->CanTransition()) continue;
2339 MarkBit map_mark = Marking::MarkBitFrom(map);
2340 ClearNonLivePrototypeTransitions(map);
2341 ClearNonLiveMapTransitions(map, map_mark);
2343 if (map_mark.Get()) {
2344 ClearNonLiveDependentCode(map->dependent_code());
2346 ClearDependentCode(map->dependent_code());
2347 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2351 // Iterate over property cell space, removing dependent code that is not
2352 // otherwise kept alive by strong references.
2353 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2354 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
2355 cell = cell_iterator.Next()) {
2356 if (IsMarked(cell)) {
2357 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2361 // Iterate over allocation sites, removing dependent code that is not
2362 // otherwise kept alive by strong references.
2363 Object* undefined = heap()->undefined_value();
2364 for (Object* site = heap()->allocation_sites_list(); site != undefined;
2365 site = AllocationSite::cast(site)->weak_next()) {
2366 if (IsMarked(site)) {
2367 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2371 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2372 WeakHashTable* table =
2373 WeakHashTable::cast(heap_->weak_object_to_code_table());
2374 uint32_t capacity = table->Capacity();
2375 for (uint32_t i = 0; i < capacity; i++) {
2376 uint32_t key_index = table->EntryToIndex(i);
2377 Object* key = table->get(key_index);
2378 if (!table->IsKey(key)) continue;
2379 uint32_t value_index = table->EntryToValueIndex(i);
2380 Object* value = table->get(value_index);
2381 if (key->IsCell() && !IsMarked(key)) {
2382 Cell* cell = Cell::cast(key);
2383 Object* object = cell->value();
2384 if (IsMarked(object)) {
2385 MarkBit mark = Marking::MarkBitFrom(cell);
2386 SetMark(cell, mark);
2387 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2388 RecordSlot(value_slot, value_slot, *value_slot);
2391 if (IsMarked(key)) {
2392 if (!IsMarked(value)) {
2393 HeapObject* obj = HeapObject::cast(value);
2394 MarkBit mark = Marking::MarkBitFrom(obj);
2397 ClearNonLiveDependentCode(DependentCode::cast(value));
2399 ClearDependentCode(DependentCode::cast(value));
2400 table->set(key_index, heap_->the_hole_value());
2401 table->set(value_index, heap_->the_hole_value());
2402 table->ElementRemoved();
2409 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2410 int number_of_transitions = map->NumberOfProtoTransitions();
2411 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2413 int new_number_of_transitions = 0;
2414 const int header = Map::kProtoTransitionHeaderSize;
2415 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2416 const int map_offset = header + Map::kProtoTransitionMapOffset;
2417 const int step = Map::kProtoTransitionElementsPerEntry;
2418 for (int i = 0; i < number_of_transitions; i++) {
2419 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2420 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2421 if (IsMarked(prototype) && IsMarked(cached_map)) {
2422 DCHECK(!prototype->IsUndefined());
2423 int proto_index = proto_offset + new_number_of_transitions * step;
2424 int map_index = map_offset + new_number_of_transitions * step;
2425 if (new_number_of_transitions != i) {
2426 prototype_transitions->set(proto_index, prototype,
2427 UPDATE_WRITE_BARRIER);
2428 prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
2430 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2431 RecordSlot(slot, slot, prototype);
2432 new_number_of_transitions++;
2436 if (new_number_of_transitions != number_of_transitions) {
2437 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2440 // Fill slots that became free with undefined value.
2441 for (int i = new_number_of_transitions * step;
2442 i < number_of_transitions * step; i++) {
2443 prototype_transitions->set_undefined(header + i);
2448 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2450 Object* potential_parent = map->GetBackPointer();
2451 if (!potential_parent->IsMap()) return;
2452 Map* parent = Map::cast(potential_parent);
2454 // Follow back pointer, check whether we are dealing with a map transition
2455 // from a live map to a dead path and in case clear transitions of parent.
2456 bool current_is_alive = map_mark.Get();
2457 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2458 if (!current_is_alive && parent_is_alive) {
2459 ClearMapTransitions(parent);
2464 // Clear a possible back pointer in case the transition leads to a dead map.
2465 // Return true in case a back pointer has been cleared and false otherwise.
2466 bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
2467 if (Marking::MarkBitFrom(target).Get()) return false;
2468 target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2473 void MarkCompactCollector::ClearMapTransitions(Map* map) {
2474 // If there are no transitions to be cleared, return.
2475 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2476 // properly cleared.
2477 if (!map->HasTransitionArray()) return;
2479 TransitionArray* t = map->transitions();
2481 int transition_index = 0;
2483 DescriptorArray* descriptors = map->instance_descriptors();
2484 bool descriptors_owner_died = false;
2486 // Compact all live descriptors to the left.
2487 for (int i = 0; i < t->number_of_transitions(); ++i) {
2488 Map* target = t->GetTarget(i);
2489 if (ClearMapBackPointer(target)) {
2490 if (target->instance_descriptors() == descriptors) {
2491 descriptors_owner_died = true;
2494 if (i != transition_index) {
2495 Name* key = t->GetKey(i);
2496 t->SetKey(transition_index, key);
2497 Object** key_slot = t->GetKeySlot(transition_index);
2498 RecordSlot(key_slot, key_slot, key);
2499 // Target slots do not need to be recorded since maps are not compacted.
2500 t->SetTarget(transition_index, t->GetTarget(i));
2506 // If there are no transitions to be cleared, return.
2507 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2508 // properly cleared.
2509 if (transition_index == t->number_of_transitions()) return;
2511 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2513 if (descriptors_owner_died) {
2514 if (number_of_own_descriptors > 0) {
2515 TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2516 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2517 map->set_owns_descriptors(true);
2519 DCHECK(descriptors == heap_->empty_descriptor_array());
2523 // Note that we never eliminate a transition array, though we might right-trim
2524 // such that number_of_transitions() == 0. If this assumption changes,
2525 // TransitionArray::Insert() will need to deal with the case that a transition
2526 // array disappeared during GC.
2527 int trim = t->number_of_transitions_storage() - transition_index;
2529 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2530 t, t->IsSimpleTransition() ? trim
2531 : trim * TransitionArray::kTransitionSize);
2532 t->SetNumberOfTransitions(transition_index);
2534 DCHECK(map->HasTransitionArray());
2538 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2539 DescriptorArray* descriptors,
2540 int number_of_own_descriptors) {
2541 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2542 int to_trim = number_of_descriptors - number_of_own_descriptors;
2543 if (to_trim == 0) return;
2545 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2546 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2547 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2549 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2550 descriptors->Sort();
2554 void MarkCompactCollector::TrimEnumCache(Map* map,
2555 DescriptorArray* descriptors) {
2556 int live_enum = map->EnumLength();
2557 if (live_enum == kInvalidEnumCacheSentinel) {
2558 live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2560 if (live_enum == 0) return descriptors->ClearEnumCache();
2562 FixedArray* enum_cache = descriptors->GetEnumCache();
2564 int to_trim = enum_cache->length() - live_enum;
2565 if (to_trim <= 0) return;
2566 heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
2569 if (!descriptors->HasEnumIndicesCache()) return;
2570 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2571 heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2575 void MarkCompactCollector::ClearDependentICList(Object* head) {
2576 Object* current = head;
2577 Object* undefined = heap()->undefined_value();
2578 while (current != undefined) {
2579 Code* code = Code::cast(current);
2580 if (IsMarked(code)) {
2581 DCHECK(code->is_weak_stub());
2582 IC::InvalidateMaps(code);
2584 current = code->next_code_link();
2585 code->set_next_code_link(undefined);
2590 void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
2591 DisallowHeapAllocation no_allocation;
2592 DependentCode::GroupStartIndexes starts(entries);
2593 int number_of_entries = starts.number_of_entries();
2594 if (number_of_entries == 0) return;
2595 int g = DependentCode::kWeakICGroup;
2596 if (starts.at(g) != starts.at(g + 1)) {
2597 int i = starts.at(g);
2598 DCHECK(i + 1 == starts.at(g + 1));
2599 Object* head = entries->object_at(i);
2600 ClearDependentICList(head);
2602 g = DependentCode::kWeakCodeGroup;
2603 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2604 // If the entry is compilation info then the map must be alive,
2605 // and ClearDependentCode shouldn't be called.
2606 DCHECK(entries->is_code_at(i));
2607 Code* code = entries->code_at(i);
2608 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2609 DependentCode::SetMarkedForDeoptimization(
2610 code, static_cast<DependentCode::DependencyGroup>(g));
2611 code->InvalidateEmbeddedObjects();
2612 have_code_to_deoptimize_ = true;
2615 for (int i = 0; i < number_of_entries; i++) {
2616 entries->clear_at(i);
2621 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2622 DependentCode* entries, int group, int start, int end, int new_start) {
2624 if (group == DependentCode::kWeakICGroup) {
2625 // Dependent weak IC stubs form a linked list and only the head is stored
2626 // in the dependent code array.
2628 DCHECK(start + 1 == end);
2629 Object* old_head = entries->object_at(start);
2630 MarkCompactWeakObjectRetainer retainer;
2631 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
2632 entries->set_object_at(new_start, head);
2633 Object** slot = entries->slot_at(new_start);
2634 RecordSlot(slot, slot, head);
2635 // We do not compact this group even if the head is undefined,
2636 // more dependent ICs are likely to be added later.
2640 for (int i = start; i < end; i++) {
2641 Object* obj = entries->object_at(i);
2642 DCHECK(obj->IsCode() || IsMarked(obj));
2643 if (IsMarked(obj) &&
2644 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2645 if (new_start + survived != i) {
2646 entries->set_object_at(new_start + survived, obj);
2648 Object** slot = entries->slot_at(new_start + survived);
2649 RecordSlot(slot, slot, obj);
2654 entries->set_number_of_entries(
2655 static_cast<DependentCode::DependencyGroup>(group), survived);
2660 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2661 DisallowHeapAllocation no_allocation;
2662 DependentCode::GroupStartIndexes starts(entries);
2663 int number_of_entries = starts.number_of_entries();
2664 if (number_of_entries == 0) return;
2665 int new_number_of_entries = 0;
2666 // Go through all groups, remove dead codes and compact.
2667 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2668 int survived = ClearNonLiveDependentCodeInGroup(
2669 entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2670 new_number_of_entries += survived;
2672 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2673 entries->clear_at(i);
2678 void MarkCompactCollector::ProcessWeakCollections() {
2679 GCTracer::Scope gc_scope(heap()->tracer(),
2680 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2681 Object* weak_collection_obj = heap()->encountered_weak_collections();
2682 while (weak_collection_obj != Smi::FromInt(0)) {
2683 JSWeakCollection* weak_collection =
2684 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2685 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2686 if (weak_collection->table()->IsHashTable()) {
2687 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2688 Object** anchor = reinterpret_cast<Object**>(table->address());
2689 for (int i = 0; i < table->Capacity(); i++) {
2690 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2692 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2693 RecordSlot(anchor, key_slot, *key_slot);
2694 Object** value_slot =
2695 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2696 MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2701 weak_collection_obj = weak_collection->next();
2706 void MarkCompactCollector::ClearWeakCollections() {
2707 GCTracer::Scope gc_scope(heap()->tracer(),
2708 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2709 Object* weak_collection_obj = heap()->encountered_weak_collections();
2710 while (weak_collection_obj != Smi::FromInt(0)) {
2711 JSWeakCollection* weak_collection =
2712 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2713 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2714 if (weak_collection->table()->IsHashTable()) {
2715 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2716 for (int i = 0; i < table->Capacity(); i++) {
2717 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2718 if (!MarkCompactCollector::IsMarked(key)) {
2719 table->RemoveEntry(i);
2723 weak_collection_obj = weak_collection->next();
2724 weak_collection->set_next(heap()->undefined_value());
2726 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2730 void MarkCompactCollector::AbortWeakCollections() {
2731 GCTracer::Scope gc_scope(heap()->tracer(),
2732 GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
2733 Object* weak_collection_obj = heap()->encountered_weak_collections();
2734 while (weak_collection_obj != Smi::FromInt(0)) {
2735 JSWeakCollection* weak_collection =
2736 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2737 weak_collection_obj = weak_collection->next();
2738 weak_collection->set_next(heap()->undefined_value());
2740 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2744 void MarkCompactCollector::ProcessAndClearWeakCells() {
2745 HeapObject* undefined = heap()->undefined_value();
2746 Object* weak_cell_obj = heap()->encountered_weak_cells();
2747 while (weak_cell_obj != Smi::FromInt(0)) {
2748 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2749 // We do not insert cleared weak cells into the list, so the value
2750 // cannot be a Smi here.
2751 HeapObject* value = HeapObject::cast(weak_cell->value());
2752 if (!MarkCompactCollector::IsMarked(value)) {
2755 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2756 heap()->mark_compact_collector()->RecordSlot(slot, slot, value);
2758 weak_cell_obj = weak_cell->next();
2759 weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
2761 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2765 void MarkCompactCollector::AbortWeakCells() {
2766 Object* undefined = heap()->undefined_value();
2767 Object* weak_cell_obj = heap()->encountered_weak_cells();
2768 while (weak_cell_obj != Smi::FromInt(0)) {
2769 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2770 weak_cell_obj = weak_cell->next();
2771 weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
2773 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2777 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
2778 if (heap_->InNewSpace(value)) {
2779 heap_->store_buffer()->Mark(slot);
2780 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2781 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2782 reinterpret_cast<Object**>(slot),
2783 SlotsBuffer::IGNORE_OVERFLOW);
2788 // We scavenge new space simultaneously with sweeping. This is done in two
2791 // The first pass migrates all alive objects from one semispace to another or
2792 // promotes them to old space. Forwarding address is written directly into
2793 // first word of object without any encoding. If object is dead we write
2794 // NULL as a forwarding address.
2796 // The second pass updates pointers to new space in all spaces. It is possible
2797 // to encounter pointers to dead new space objects during traversal of pointers
2798 // to new space. We should clear them to avoid encountering them during next
2799 // pointer iteration. This is an issue if the store buffer overflows and we
2800 // have to scan the entire old space, including dead objects, looking for
2801 // pointers to new space.
2802 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2803 int size, AllocationSpace dest) {
2804 Address dst_addr = dst->address();
2805 Address src_addr = src->address();
2806 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2807 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2808 if (dest == OLD_POINTER_SPACE) {
2809 Address src_slot = src_addr;
2810 Address dst_slot = dst_addr;
2811 DCHECK(IsAligned(size, kPointerSize));
2813 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2814 Object* value = Memory::Object_at(src_slot);
2816 Memory::Object_at(dst_slot) = value;
2818 if (!src->MayContainRawValues()) {
2819 RecordMigratedSlot(value, dst_slot);
2822 src_slot += kPointerSize;
2823 dst_slot += kPointerSize;
2826 if (compacting_ && dst->IsJSFunction()) {
2827 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2828 Address code_entry = Memory::Address_at(code_entry_slot);
2830 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2831 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2832 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2833 SlotsBuffer::IGNORE_OVERFLOW);
2835 } else if (dst->IsConstantPoolArray()) {
2836 // We special case ConstantPoolArrays since they could contain integers
2837 // value entries which look like tagged pointers.
2838 // TODO(mstarzinger): restructure this code to avoid this special-casing.
2839 ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2840 ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2841 while (!code_iter.is_finished()) {
2842 Address code_entry_slot =
2843 dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2844 Address code_entry = Memory::Address_at(code_entry_slot);
2846 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2847 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2848 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2849 SlotsBuffer::IGNORE_OVERFLOW);
2852 ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2853 while (!heap_iter.is_finished()) {
2855 dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2856 Object* value = Memory::Object_at(heap_slot);
2857 RecordMigratedSlot(value, heap_slot);
2860 } else if (dest == CODE_SPACE) {
2861 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2862 heap()->MoveBlock(dst_addr, src_addr, size);
2863 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2864 SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
2865 SlotsBuffer::IGNORE_OVERFLOW);
2866 Code::cast(dst)->Relocate(dst_addr - src_addr);
2868 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2869 heap()->MoveBlock(dst_addr, src_addr, size);
2871 heap()->OnMoveEvent(dst, src, size);
2872 Memory::Address_at(src_addr) = dst_addr;
2876 // Visitor for updating pointers from live objects in old spaces to new space.
2877 // It does not expect to encounter pointers to dead objects.
2878 class PointersUpdatingVisitor : public ObjectVisitor {
2880 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2882 void VisitPointer(Object** p) { UpdatePointer(p); }
2884 void VisitPointers(Object** start, Object** end) {
2885 for (Object** p = start; p < end; p++) UpdatePointer(p);
2888 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2889 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2890 Object* target = rinfo->target_object();
2891 Object* old_target = target;
2892 VisitPointer(&target);
2893 // Avoid unnecessary changes that might unnecessary flush the instruction
2895 if (target != old_target) {
2896 rinfo->set_target_object(target);
2900 void VisitCodeTarget(RelocInfo* rinfo) {
2901 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2902 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2903 Object* old_target = target;
2904 VisitPointer(&target);
2905 if (target != old_target) {
2906 rinfo->set_target_address(Code::cast(target)->instruction_start());
2910 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2911 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2912 Object* stub = rinfo->code_age_stub();
2913 DCHECK(stub != NULL);
2914 VisitPointer(&stub);
2915 if (stub != rinfo->code_age_stub()) {
2916 rinfo->set_code_age_stub(Code::cast(stub));
2920 void VisitDebugTarget(RelocInfo* rinfo) {
2921 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2922 rinfo->IsPatchedReturnSequence()) ||
2923 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2924 rinfo->IsPatchedDebugBreakSlotSequence()));
2925 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2926 VisitPointer(&target);
2927 rinfo->set_call_address(Code::cast(target)->instruction_start());
2930 static inline void UpdateSlot(Heap* heap, Object** slot) {
2931 Object* obj = *slot;
2933 if (!obj->IsHeapObject()) return;
2935 HeapObject* heap_obj = HeapObject::cast(obj);
2937 MapWord map_word = heap_obj->map_word();
2938 if (map_word.IsForwardingAddress()) {
2939 DCHECK(heap->InFromSpace(heap_obj) ||
2940 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2941 HeapObject* target = map_word.ToForwardingAddress();
2943 DCHECK(!heap->InFromSpace(target) &&
2944 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2949 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2955 static void UpdatePointer(HeapObject** address, HeapObject* object) {
2956 Address new_addr = Memory::Address_at(object->address());
2958 // The new space sweep will overwrite the map word of dead objects
2959 // with NULL. In this case we do not need to transfer this entry to
2960 // the store buffer which we are rebuilding.
2961 // We perform the pointer update with a no barrier compare-and-swap. The
2962 // compare and swap may fail in the case where the pointer update tries to
2963 // update garbage memory which was concurrently accessed by the sweeper.
2964 if (new_addr != NULL) {
2965 base::NoBarrier_CompareAndSwap(
2966 reinterpret_cast<base::AtomicWord*>(address),
2967 reinterpret_cast<base::AtomicWord>(object),
2968 reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
2973 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2975 MapWord map_word = HeapObject::cast(*p)->map_word();
2977 if (map_word.IsForwardingAddress()) {
2978 return String::cast(map_word.ToForwardingAddress());
2981 return String::cast(*p);
2985 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2987 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2989 OldSpace* target_space = heap()->TargetSpace(object);
2991 DCHECK(target_space == heap()->old_pointer_space() ||
2992 target_space == heap()->old_data_space());
2994 AllocationResult allocation = target_space->AllocateRaw(object_size);
2995 if (allocation.To(&target)) {
2996 MigrateObject(target, object, object_size, target_space->identity());
2997 heap()->IncrementPromotedObjectsSize(object_size);
3005 void MarkCompactCollector::EvacuateNewSpace() {
3006 // There are soft limits in the allocation code, designed trigger a mark
3007 // sweep collection by failing allocations. But since we are already in
3008 // a mark-sweep allocation, there is no sense in trying to trigger one.
3009 AlwaysAllocateScope scope(isolate());
3011 NewSpace* new_space = heap()->new_space();
3013 // Store allocation range before flipping semispaces.
3014 Address from_bottom = new_space->bottom();
3015 Address from_top = new_space->top();
3017 // Flip the semispaces. After flipping, to space is empty, from space has
3020 new_space->ResetAllocationInfo();
3022 int survivors_size = 0;
3024 // First pass: traverse all objects in inactive semispace, remove marks,
3025 // migrate live objects and write forwarding addresses. This stage puts
3026 // new entries in the store buffer and may cause some pages to be marked
3027 // scan-on-scavenge.
3028 NewSpacePageIterator it(from_bottom, from_top);
3029 while (it.has_next()) {
3030 NewSpacePage* p = it.next();
3031 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3034 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3035 new_space->set_age_mark(new_space->top());
3039 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3040 AlwaysAllocateScope always_allocate(isolate());
3041 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3042 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3047 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3048 Address cell_base = it.CurrentCellBase();
3049 MarkBit::CellType* cell = it.CurrentCell();
3051 if (*cell == 0) continue;
3053 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3054 for (int i = 0; i < live_objects; i++) {
3055 Address object_addr = cell_base + offsets[i] * kPointerSize;
3056 HeapObject* object = HeapObject::FromAddress(object_addr);
3057 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3059 int size = object->Size();
3061 HeapObject* target_object;
3062 AllocationResult allocation = space->AllocateRaw(size);
3063 if (!allocation.To(&target_object)) {
3064 // If allocation failed, use emergency memory and re-try allocation.
3065 CHECK(space->HasEmergencyMemory());
3066 space->UseEmergencyMemory();
3067 allocation = space->AllocateRaw(size);
3069 if (!allocation.To(&target_object)) {
3070 // OS refused to give us memory.
3071 V8::FatalProcessOutOfMemory("Evacuation");
3075 MigrateObject(target_object, object, size, space->identity());
3076 DCHECK(object->map_word().IsForwardingAddress());
3079 // Clear marking bits for current cell.
3082 p->ResetLiveBytes();
3086 void MarkCompactCollector::EvacuatePages() {
3087 int npages = evacuation_candidates_.length();
3088 for (int i = 0; i < npages; i++) {
3089 Page* p = evacuation_candidates_[i];
3090 DCHECK(p->IsEvacuationCandidate() ||
3091 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3092 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3093 MemoryChunk::SWEEPING_DONE);
3094 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3095 // Allocate emergency memory for the case when compaction fails due to out
3097 if (!space->HasEmergencyMemory()) {
3098 space->CreateEmergencyMemory();
3100 if (p->IsEvacuationCandidate()) {
3101 // During compaction we might have to request a new page. Check that we
3102 // have an emergency page and the space still has room for that.
3103 if (space->HasEmergencyMemory() && space->CanExpand()) {
3104 EvacuateLiveObjectsFromPage(p);
3106 // Without room for expansion evacuation is not guaranteed to succeed.
3107 // Pessimistically abandon unevacuated pages.
3108 for (int j = i; j < npages; j++) {
3109 Page* page = evacuation_candidates_[j];
3110 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3111 page->ClearEvacuationCandidate();
3112 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3119 // Release emergency memory.
3120 PagedSpaces spaces(heap());
3121 for (PagedSpace* space = spaces.next(); space != NULL;
3122 space = spaces.next()) {
3123 if (space->HasEmergencyMemory()) {
3124 space->FreeEmergencyMemory();
3131 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3133 virtual Object* RetainAs(Object* object) {
3134 if (object->IsHeapObject()) {
3135 HeapObject* heap_object = HeapObject::cast(object);
3136 MapWord map_word = heap_object->map_word();
3137 if (map_word.IsForwardingAddress()) {
3138 return map_word.ToForwardingAddress();
3146 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3147 SlotsBuffer::SlotType slot_type, Address addr) {
3148 switch (slot_type) {
3149 case SlotsBuffer::CODE_TARGET_SLOT: {
3150 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3151 rinfo.Visit(isolate, v);
3154 case SlotsBuffer::CODE_ENTRY_SLOT: {
3155 v->VisitCodeEntry(addr);
3158 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3159 HeapObject* obj = HeapObject::FromAddress(addr);
3160 Code::cast(obj)->CodeIterateBody(v);
3163 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3164 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3165 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3168 case SlotsBuffer::JS_RETURN_SLOT: {
3169 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3170 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3173 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3174 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3175 rinfo.Visit(isolate, v);
3185 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3188 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3191 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3194 template <MarkCompactCollector::SweepingParallelism mode>
3195 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3197 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3198 DCHECK(free_list == NULL);
3199 return space->Free(start, size);
3201 // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3202 return size - free_list->Free(start, size);
3207 // Sweeps a page. After sweeping the page can be iterated.
3208 // Slots in live objects pointing into evacuation candidates are updated
3210 // Returns the size of the biggest continuous freed memory chunk in bytes.
3211 template <SweepingMode sweeping_mode,
3212 MarkCompactCollector::SweepingParallelism parallelism,
3213 SkipListRebuildingMode skip_list_mode,
3214 FreeSpaceTreatmentMode free_space_mode>
3215 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3217 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3218 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3219 space->identity() == CODE_SPACE);
3220 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3221 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3222 sweeping_mode == SWEEP_ONLY);
3224 Address free_start = p->area_start();
3225 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3228 SkipList* skip_list = p->skip_list();
3229 int curr_region = -1;
3230 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3234 intptr_t freed_bytes = 0;
3235 intptr_t max_freed_bytes = 0;
3237 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3238 Address cell_base = it.CurrentCellBase();
3239 MarkBit::CellType* cell = it.CurrentCell();
3240 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3242 for (; live_objects != 0; live_objects--) {
3243 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3244 if (free_end != free_start) {
3245 int size = static_cast<int>(free_end - free_start);
3246 if (free_space_mode == ZAP_FREE_SPACE) {
3247 memset(free_start, 0xcc, size);
3249 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3250 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3251 #ifdef ENABLE_GDB_JIT_INTERFACE
3252 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3253 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3257 HeapObject* live_object = HeapObject::FromAddress(free_end);
3258 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3259 Map* map = live_object->map();
3260 int size = live_object->SizeFromMap(map);
3261 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3262 live_object->IterateBody(map->instance_type(), size, v);
3264 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3265 int new_region_start = SkipList::RegionNumber(free_end);
3266 int new_region_end =
3267 SkipList::RegionNumber(free_end + size - kPointerSize);
3268 if (new_region_start != curr_region || new_region_end != curr_region) {
3269 skip_list->AddObject(free_end, size);
3270 curr_region = new_region_end;
3273 free_start = free_end + size;
3275 // Clear marking bits for current cell.
3278 if (free_start != p->area_end()) {
3279 int size = static_cast<int>(p->area_end() - free_start);
3280 if (free_space_mode == ZAP_FREE_SPACE) {
3281 memset(free_start, 0xcc, size);
3283 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3284 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3285 #ifdef ENABLE_GDB_JIT_INTERFACE
3286 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3287 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3291 p->ResetLiveBytes();
3293 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3294 // When concurrent sweeping is active, the page will be marked after
3295 // sweeping by the main thread.
3296 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3300 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3304 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3305 Page* p = Page::FromAddress(code->address());
3307 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3311 Address code_start = code->address();
3312 Address code_end = code_start + code->Size();
3314 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3315 uint32_t end_index =
3316 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3318 Bitmap* b = p->markbits();
3320 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3321 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3323 MarkBit::CellType* start_cell = start_mark_bit.cell();
3324 MarkBit::CellType* end_cell = end_mark_bit.cell();
3327 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3328 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3330 if (start_cell == end_cell) {
3331 *start_cell |= start_mask & end_mask;
3333 *start_cell |= start_mask;
3334 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3337 *end_cell |= end_mask;
3340 for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3349 static bool IsOnInvalidatedCodeObject(Address addr) {
3350 // We did not record any slots in large objects thus
3351 // we can safely go to the page from the slot address.
3352 Page* p = Page::FromAddress(addr);
3354 // First check owner's identity because old pointer and old data spaces
3355 // are swept lazily and might still have non-zero mark-bits on some
3357 if (p->owner()->identity() != CODE_SPACE) return false;
3359 // In code space only bits on evacuation candidates (but we don't record
3360 // any slots on them) and under invalidated code objects are non-zero.
3362 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3364 return mark_bit.Get();
3368 void MarkCompactCollector::InvalidateCode(Code* code) {
3369 if (heap_->incremental_marking()->IsCompacting() &&
3370 !ShouldSkipEvacuationSlotRecording(code)) {
3371 DCHECK(compacting_);
3373 // If the object is white than no slots were recorded on it yet.
3374 MarkBit mark_bit = Marking::MarkBitFrom(code);
3375 if (Marking::IsWhite(mark_bit)) return;
3377 invalidated_code_.Add(code);
3382 // Return true if the given code is deoptimized or will be deoptimized.
3383 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3384 return code->is_optimized_code() && code->marked_for_deoptimization();
3388 bool MarkCompactCollector::MarkInvalidatedCode() {
3389 bool code_marked = false;
3391 int length = invalidated_code_.length();
3392 for (int i = 0; i < length; i++) {
3393 Code* code = invalidated_code_[i];
3395 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3404 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3405 int length = invalidated_code_.length();
3406 for (int i = 0; i < length; i++) {
3407 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3412 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3413 int length = invalidated_code_.length();
3414 for (int i = 0; i < length; i++) {
3415 Code* code = invalidated_code_[i];
3417 code->Iterate(visitor);
3418 SetMarkBitsUnderInvalidatedCode(code, false);
3421 invalidated_code_.Rewind(0);
3425 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3426 Heap::RelocationLock relocation_lock(heap());
3428 bool code_slots_filtering_required;
3430 GCTracer::Scope gc_scope(heap()->tracer(),
3431 GCTracer::Scope::MC_SWEEP_NEWSPACE);
3432 code_slots_filtering_required = MarkInvalidatedCode();
3437 GCTracer::Scope gc_scope(heap()->tracer(),
3438 GCTracer::Scope::MC_EVACUATE_PAGES);
3442 // Second pass: find pointers to new space and update them.
3443 PointersUpdatingVisitor updating_visitor(heap());
3446 GCTracer::Scope gc_scope(heap()->tracer(),
3447 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3448 // Update pointers in to space.
3449 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3450 heap()->new_space()->top());
3451 for (HeapObject* object = to_it.Next(); object != NULL;
3452 object = to_it.Next()) {
3453 Map* map = object->map();
3454 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3460 GCTracer::Scope gc_scope(heap()->tracer(),
3461 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3463 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3467 GCTracer::Scope gc_scope(heap()->tracer(),
3468 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3469 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3470 &Heap::ScavengeStoreBufferCallback);
3471 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3476 GCTracer::Scope gc_scope(heap()->tracer(),
3477 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3478 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
3479 code_slots_filtering_required);
3480 if (FLAG_trace_fragmentation) {
3481 PrintF(" migration slots buffer: %d\n",
3482 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3485 if (compacting_ && was_marked_incrementally_) {
3486 // It's difficult to filter out slots recorded for large objects.
3487 LargeObjectIterator it(heap_->lo_space());
3488 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3489 // LargeObjectSpace is not swept yet thus we have to skip
3490 // dead objects explicitly.
3491 if (!IsMarked(obj)) continue;
3493 Page* p = Page::FromAddress(obj->address());
3494 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3495 obj->Iterate(&updating_visitor);
3496 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3502 int npages = evacuation_candidates_.length();
3504 GCTracer::Scope gc_scope(
3506 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3507 for (int i = 0; i < npages; i++) {
3508 Page* p = evacuation_candidates_[i];
3509 DCHECK(p->IsEvacuationCandidate() ||
3510 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3512 if (p->IsEvacuationCandidate()) {
3513 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
3514 code_slots_filtering_required);
3515 if (FLAG_trace_fragmentation) {
3516 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3517 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3520 // Important: skip list should be cleared only after roots were updated
3521 // because root iteration traverses the stack and might have to find
3522 // code objects from non-updated pc pointing into evacuation candidate.
3523 SkipList* list = p->skip_list();
3524 if (list != NULL) list->Clear();
3526 if (FLAG_gc_verbose) {
3527 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3528 reinterpret_cast<intptr_t>(p));
3530 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3531 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3533 switch (space->identity()) {
3534 case OLD_DATA_SPACE:
3535 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3536 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3539 case OLD_POINTER_SPACE:
3540 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3541 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3545 if (FLAG_zap_code_space) {
3546 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3547 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3550 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3551 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3563 GCTracer::Scope gc_scope(heap()->tracer(),
3564 GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3566 // Update pointers from cells.
3567 HeapObjectIterator cell_iterator(heap_->cell_space());
3568 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3569 cell = cell_iterator.Next()) {
3570 if (cell->IsCell()) {
3571 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3575 HeapObjectIterator js_global_property_cell_iterator(
3576 heap_->property_cell_space());
3577 for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3578 cell = js_global_property_cell_iterator.Next()) {
3579 if (cell->IsPropertyCell()) {
3580 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3584 heap_->string_table()->Iterate(&updating_visitor);
3585 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3586 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3587 WeakHashTable* table =
3588 WeakHashTable::cast(heap_->weak_object_to_code_table());
3589 table->Iterate(&updating_visitor);
3590 table->Rehash(heap_->isolate()->factory()->undefined_value());
3593 // Update pointers from external string table.
3594 heap_->UpdateReferencesInExternalStringTable(
3595 &UpdateReferenceInExternalStringTableEntry);
3597 EvacuationWeakObjectRetainer evacuation_object_retainer;
3598 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3600 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3602 ProcessInvalidatedCode(&updating_visitor);
3604 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3606 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3607 DCHECK(migration_slots_buffer_ == NULL);
3611 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3612 int npages = evacuation_candidates_.length();
3613 for (int i = 0; i < npages; i++) {
3614 Page* p = evacuation_candidates_[i];
3615 if (!p->IsEvacuationCandidate()) continue;
3617 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3618 p->InsertAfter(space->LastPage());
3623 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3624 int npages = evacuation_candidates_.length();
3625 for (int i = 0; i < npages; i++) {
3626 Page* p = evacuation_candidates_[i];
3627 if (!p->IsEvacuationCandidate()) continue;
3628 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3629 space->Free(p->area_start(), p->area_size());
3630 p->set_scan_on_scavenge(false);
3631 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3632 p->ResetLiveBytes();
3633 space->ReleasePage(p);
3635 evacuation_candidates_.Rewind(0);
3636 compacting_ = false;
3637 heap()->FreeQueuedChunks();
3641 static const int kStartTableEntriesPerLine = 5;
3642 static const int kStartTableLines = 171;
3643 static const int kStartTableInvalidLine = 127;
3644 static const int kStartTableUnusedEntry = 126;
3646 #define _ kStartTableUnusedEntry
3647 #define X kStartTableInvalidLine
3648 // Mark-bit to object start offset table.
3650 // The line is indexed by the mark bits in a byte. The first number on
3651 // the line describes the number of live object starts for the line and the
3652 // other numbers on the line describe the offsets (in words) of the object
3655 // Since objects are at least 2 words large we don't have entries for two
3656 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3657 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
4005 // Takes a word of mark bits. Returns the number of objects that start in the
4006 // range. Puts the offsets of the words in the supplied array.
4007 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
4011 // No consecutive 1 bits.
4012 DCHECK((mark_bits & 0x180) != 0x180);
4013 DCHECK((mark_bits & 0x18000) != 0x18000);
4014 DCHECK((mark_bits & 0x1800000) != 0x1800000);
4016 while (mark_bits != 0) {
4017 int byte = (mark_bits & 0xff);
4020 DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
4021 char* table = kStartTable + byte * kStartTableEntriesPerLine;
4022 int objects_in_these_8_words = table[0];
4023 DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
4024 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
4025 for (int i = 0; i < objects_in_these_8_words; i++) {
4026 starts[objects++] = offset + table[1 + i];
4035 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4036 int required_freed_bytes) {
4038 int max_freed_overall = 0;
4039 PageIterator it(space);
4040 while (it.has_next()) {
4041 Page* p = it.next();
4042 max_freed = SweepInParallel(p, space);
4043 DCHECK(max_freed >= 0);
4044 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4047 max_freed_overall = Max(max_freed, max_freed_overall);
4048 if (p == space->end_of_unswept_pages()) break;
4050 return max_freed_overall;
4054 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4056 if (page->TryParallelSweeping()) {
4057 FreeList* free_list = space == heap()->old_pointer_space()
4058 ? free_list_old_pointer_space_.get()
4059 : free_list_old_data_space_.get();
4060 FreeList private_free_list(space);
4061 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
4062 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
4063 free_list->Concatenate(&private_free_list);
4069 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4070 space->ClearStats();
4072 // We defensively initialize end_of_unswept_pages_ here with the first page
4073 // of the pages list.
4074 space->set_end_of_unswept_pages(space->FirstPage());
4076 PageIterator it(space);
4078 int pages_swept = 0;
4079 bool unused_page_present = false;
4080 bool parallel_sweeping_active = false;
4082 while (it.has_next()) {
4083 Page* p = it.next();
4084 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4086 // Clear sweeping flags indicating that marking bits are still intact.
4089 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4090 p->IsEvacuationCandidate()) {
4091 // Will be processed in EvacuateNewSpaceAndCandidates.
4092 DCHECK(evacuation_candidates_.length() > 0);
4096 // One unused page is kept, all further are released before sweeping them.
4097 if (p->LiveBytes() == 0) {
4098 if (unused_page_present) {
4099 if (FLAG_gc_verbose) {
4100 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4101 reinterpret_cast<intptr_t>(p));
4103 // Adjust unswept free bytes because releasing a page expects said
4104 // counter to be accurate for unswept pages.
4105 space->IncreaseUnsweptFreeBytes(p);
4106 space->ReleasePage(p);
4109 unused_page_present = true;
4113 case CONCURRENT_SWEEPING:
4114 if (!parallel_sweeping_active) {
4115 if (FLAG_gc_verbose) {
4116 PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
4117 reinterpret_cast<intptr_t>(p));
4119 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4120 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4122 parallel_sweeping_active = true;
4124 if (FLAG_gc_verbose) {
4125 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
4126 reinterpret_cast<intptr_t>(p));
4128 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4129 space->IncreaseUnsweptFreeBytes(p);
4131 space->set_end_of_unswept_pages(p);
4133 case SEQUENTIAL_SWEEPING: {
4134 if (FLAG_gc_verbose) {
4135 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
4137 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4138 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4139 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4140 } else if (space->identity() == CODE_SPACE) {
4141 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4142 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4144 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4145 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4150 default: { UNREACHABLE(); }
4154 if (FLAG_gc_verbose) {
4155 PrintF("SweepSpace: %s (%d pages swept)\n",
4156 AllocationSpaceName(space->identity()), pages_swept);
4159 // Give pages that are queued to be freed back to the OS.
4160 heap()->FreeQueuedChunks();
4164 void MarkCompactCollector::SweepSpaces() {
4165 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4166 double start_time = 0.0;
4167 if (FLAG_print_cumulative_gc_stat) {
4168 start_time = base::OS::TimeCurrentMillis();
4172 state_ = SWEEP_SPACES;
4174 MoveEvacuationCandidatesToEndOfPagesList();
4176 // Noncompacting collections simply sweep the spaces to clear the mark
4177 // bits and free the nonlive blocks (for old and map spaces). We sweep
4178 // the map space last because freeing non-live maps overwrites them and
4179 // the other spaces rely on possibly non-live maps to get the sizes for
4180 // non-live objects.
4182 GCTracer::Scope sweep_scope(heap()->tracer(),
4183 GCTracer::Scope::MC_SWEEP_OLDSPACE);
4185 SequentialSweepingScope scope(this);
4186 SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
4187 SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
4189 sweeping_in_progress_ = true;
4190 if (!FLAG_predictable) {
4191 StartSweeperThreads();
4194 RemoveDeadInvalidatedCode();
4197 GCTracer::Scope sweep_scope(heap()->tracer(),
4198 GCTracer::Scope::MC_SWEEP_CODE);
4199 SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
4203 GCTracer::Scope sweep_scope(heap()->tracer(),
4204 GCTracer::Scope::MC_SWEEP_CELL);
4205 SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
4206 SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
4209 EvacuateNewSpaceAndCandidates();
4211 // ClearNonLiveTransitions depends on precise sweeping of map space to
4212 // detect whether unmarked map became dead in this collection or in one
4213 // of the previous ones.
4215 GCTracer::Scope sweep_scope(heap()->tracer(),
4216 GCTracer::Scope::MC_SWEEP_MAP);
4217 SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
4220 // Deallocate unmarked objects and clear marked bits for marked objects.
4221 heap_->lo_space()->FreeUnmarkedObjects();
4223 // Deallocate evacuated candidate pages.
4224 ReleaseEvacuationCandidates();
4225 CodeRange* code_range = heap()->isolate()->code_range();
4226 if (code_range != NULL && code_range->valid()) {
4227 code_range->ReserveEmergencyBlock();
4230 if (FLAG_print_cumulative_gc_stat) {
4231 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4237 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4238 PageIterator it(space);
4239 while (it.has_next()) {
4240 Page* p = it.next();
4241 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4242 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4245 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4250 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4251 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4252 ParallelSweepSpaceComplete(heap()->old_data_space());
4256 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4257 if (isolate()->debug()->is_loaded() ||
4258 isolate()->debug()->has_break_points()) {
4263 if (code_flusher_ != NULL) return;
4264 code_flusher_ = new CodeFlusher(isolate());
4266 if (code_flusher_ == NULL) return;
4267 code_flusher_->EvictAllCandidates();
4268 delete code_flusher_;
4269 code_flusher_ = NULL;
4272 if (FLAG_trace_code_flushing) {
4273 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4278 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4279 // Our profiling tools do not expect intersections between
4280 // code objects. We should either reenable it or change our tools.
4281 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4283 if (obj->IsCode()) {
4284 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4289 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4292 void MarkCompactCollector::Initialize() {
4293 MarkCompactMarkingVisitor::Initialize();
4294 IncrementalMarking::Initialize();
4298 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4299 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4303 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4304 SlotsBuffer** buffer_address, SlotType type,
4305 Address addr, AdditionMode mode) {
4306 SlotsBuffer* buffer = *buffer_address;
4307 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4308 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4309 allocator->DeallocateChain(buffer_address);
4312 buffer = allocator->AllocateBuffer(buffer);
4313 *buffer_address = buffer;
4315 DCHECK(buffer->HasSpaceForTypedSlot());
4316 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4317 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4322 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4323 if (RelocInfo::IsCodeTarget(rmode)) {
4324 return SlotsBuffer::CODE_TARGET_SLOT;
4325 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4326 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4327 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4328 return SlotsBuffer::DEBUG_TARGET_SLOT;
4329 } else if (RelocInfo::IsJSReturn(rmode)) {
4330 return SlotsBuffer::JS_RETURN_SLOT;
4333 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4337 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4338 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4339 RelocInfo::Mode rmode = rinfo->rmode();
4340 if (target_page->IsEvacuationCandidate() &&
4341 (rinfo->host() == NULL ||
4342 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4344 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4345 // This doesn't need to be typed since it is just a normal heap pointer.
4346 Object** target_pointer =
4347 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4348 success = SlotsBuffer::AddTo(
4349 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4350 target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4351 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4352 success = SlotsBuffer::AddTo(
4353 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4354 SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4355 SlotsBuffer::FAIL_ON_OVERFLOW);
4357 success = SlotsBuffer::AddTo(
4358 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4359 SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
4362 EvictEvacuationCandidate(target_page);
4368 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4369 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4370 if (target_page->IsEvacuationCandidate() &&
4371 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4372 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4373 target_page->slots_buffer_address(),
4374 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4375 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4376 EvictEvacuationCandidate(target_page);
4382 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4383 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4384 if (is_compacting()) {
4386 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4388 MarkBit mark_bit = Marking::MarkBitFrom(host);
4389 if (Marking::IsBlack(mark_bit)) {
4390 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4391 RecordRelocSlot(&rinfo, target);
4397 static inline SlotsBuffer::SlotType DecodeSlotType(
4398 SlotsBuffer::ObjectSlot slot) {
4399 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4403 void SlotsBuffer::UpdateSlots(Heap* heap) {
4404 PointersUpdatingVisitor v(heap);
4406 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4407 ObjectSlot slot = slots_[slot_idx];
4408 if (!IsTypedSlot(slot)) {
4409 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4412 DCHECK(slot_idx < idx_);
4413 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4414 reinterpret_cast<Address>(slots_[slot_idx]));
4420 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4421 PointersUpdatingVisitor v(heap);
4423 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4424 ObjectSlot slot = slots_[slot_idx];
4425 if (!IsTypedSlot(slot)) {
4426 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4427 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4431 DCHECK(slot_idx < idx_);
4432 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4433 if (!IsOnInvalidatedCodeObject(pc)) {
4434 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4435 reinterpret_cast<Address>(slots_[slot_idx]));
4442 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4443 return new SlotsBuffer(next_buffer);
4447 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4452 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4453 SlotsBuffer* buffer = *buffer_address;
4454 while (buffer != NULL) {
4455 SlotsBuffer* next_buffer = buffer->next();
4456 DeallocateBuffer(buffer);
4457 buffer = next_buffer;
4459 *buffer_address = NULL;
4462 } // namespace v8::internal