1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/cpu-profiler.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/gdb-jit.h"
15 #include "src/global-handles.h"
16 #include "src/heap/incremental-marking.h"
17 #include "src/heap/mark-compact.h"
18 #include "src/heap/objects-visiting.h"
19 #include "src/heap/objects-visiting-inl.h"
20 #include "src/heap/spaces-inl.h"
21 #include "src/heap-profiler.h"
22 #include "src/ic/ic.h"
23 #include "src/ic/stub-cache.h"
29 const char* Marking::kWhiteBitPattern = "00";
30 const char* Marking::kBlackBitPattern = "10";
31 const char* Marking::kGreyBitPattern = "11";
32 const char* Marking::kImpossibleBitPattern = "01";
35 // -------------------------------------------------------------------------
36 // MarkCompactCollector
38 MarkCompactCollector::MarkCompactCollector(Heap* heap)
43 reduce_memory_footprint_(false),
44 abort_incremental_marking_(false),
45 marking_parity_(ODD_MARKING_PARITY),
47 was_marked_incrementally_(false),
48 sweeping_in_progress_(false),
49 pending_sweeper_jobs_semaphore_(0),
51 migration_slots_buffer_(NULL),
53 marking_deque_memory_(NULL),
54 marking_deque_memory_committed_(false),
56 have_code_to_deoptimize_(false) {
60 class VerifyMarkingVisitor : public ObjectVisitor {
62 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
64 void VisitPointers(Object** start, Object** end) {
65 for (Object** current = start; current < end; current++) {
66 if ((*current)->IsHeapObject()) {
67 HeapObject* object = HeapObject::cast(*current);
68 CHECK(heap_->mark_compact_collector()->IsMarked(object));
73 void VisitEmbeddedPointer(RelocInfo* rinfo) {
74 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
75 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
76 Object* p = rinfo->target_object();
81 void VisitCell(RelocInfo* rinfo) {
82 Code* code = rinfo->host();
83 DCHECK(rinfo->rmode() == RelocInfo::CELL);
84 if (!code->IsWeakObject(rinfo->target_cell())) {
85 ObjectVisitor::VisitCell(rinfo);
94 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
95 VerifyMarkingVisitor visitor(heap);
97 Address next_object_must_be_here_or_later = bottom;
99 for (Address current = bottom; current < top; current += kPointerSize) {
100 object = HeapObject::FromAddress(current);
101 if (MarkCompactCollector::IsMarked(object)) {
102 CHECK(current >= next_object_must_be_here_or_later);
103 object->Iterate(&visitor);
104 next_object_must_be_here_or_later = current + object->Size();
110 static void VerifyMarking(NewSpace* space) {
111 Address end = space->top();
112 NewSpacePageIterator it(space->bottom(), end);
113 // The bottom position is at the start of its page. Allows us to use
114 // page->area_start() as start of range on all pages.
115 CHECK_EQ(space->bottom(),
116 NewSpacePage::FromAddress(space->bottom())->area_start());
117 while (it.has_next()) {
118 NewSpacePage* page = it.next();
119 Address limit = it.has_next() ? page->area_end() : end;
120 CHECK(limit == end || !page->Contains(end));
121 VerifyMarking(space->heap(), page->area_start(), limit);
126 static void VerifyMarking(PagedSpace* space) {
127 PageIterator it(space);
129 while (it.has_next()) {
131 VerifyMarking(space->heap(), p->area_start(), p->area_end());
136 static void VerifyMarking(Heap* heap) {
137 VerifyMarking(heap->old_pointer_space());
138 VerifyMarking(heap->old_data_space());
139 VerifyMarking(heap->code_space());
140 VerifyMarking(heap->cell_space());
141 VerifyMarking(heap->property_cell_space());
142 VerifyMarking(heap->map_space());
143 VerifyMarking(heap->new_space());
145 VerifyMarkingVisitor visitor(heap);
147 LargeObjectIterator it(heap->lo_space());
148 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
149 if (MarkCompactCollector::IsMarked(obj)) {
150 obj->Iterate(&visitor);
154 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
158 class VerifyEvacuationVisitor : public ObjectVisitor {
160 void VisitPointers(Object** start, Object** end) {
161 for (Object** current = start; current < end; current++) {
162 if ((*current)->IsHeapObject()) {
163 HeapObject* object = HeapObject::cast(*current);
164 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
171 static void VerifyEvacuation(Page* page) {
172 VerifyEvacuationVisitor visitor;
173 HeapObjectIterator iterator(page, NULL);
174 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
175 heap_object = iterator.Next()) {
176 // We skip free space objects.
177 if (!heap_object->IsFiller()) {
178 heap_object->Iterate(&visitor);
184 static void VerifyEvacuation(NewSpace* space) {
185 NewSpacePageIterator it(space->bottom(), space->top());
186 VerifyEvacuationVisitor visitor;
188 while (it.has_next()) {
189 NewSpacePage* page = it.next();
190 Address current = page->area_start();
191 Address limit = it.has_next() ? page->area_end() : space->top();
192 CHECK(limit == space->top() || !page->Contains(space->top()));
193 while (current < limit) {
194 HeapObject* object = HeapObject::FromAddress(current);
195 object->Iterate(&visitor);
196 current += object->Size();
202 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
203 if (FLAG_use_allocation_folding &&
204 (space == heap->old_pointer_space() || space == heap->old_data_space())) {
207 PageIterator it(space);
209 while (it.has_next()) {
211 if (p->IsEvacuationCandidate()) continue;
217 static void VerifyEvacuation(Heap* heap) {
218 VerifyEvacuation(heap, heap->old_pointer_space());
219 VerifyEvacuation(heap, heap->old_data_space());
220 VerifyEvacuation(heap, heap->code_space());
221 VerifyEvacuation(heap, heap->cell_space());
222 VerifyEvacuation(heap, heap->property_cell_space());
223 VerifyEvacuation(heap, heap->map_space());
224 VerifyEvacuation(heap->new_space());
226 VerifyEvacuationVisitor visitor;
227 heap->IterateStrongRoots(&visitor, VISIT_ALL);
229 #endif // VERIFY_HEAP
232 void MarkCompactCollector::SetUp() {
233 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
234 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
238 void MarkCompactCollector::TearDown() {
240 delete marking_deque_memory_;
244 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
245 DCHECK(!p->NeverEvacuate());
246 p->MarkEvacuationCandidate();
247 evacuation_candidates_.Add(p);
251 static void TraceFragmentation(PagedSpace* space) {
252 int number_of_pages = space->CountTotalPages();
253 intptr_t reserved = (number_of_pages * space->AreaSize());
254 intptr_t free = reserved - space->SizeOfObjects();
255 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
256 AllocationSpaceName(space->identity()), number_of_pages,
257 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
261 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
263 DCHECK(evacuation_candidates_.length() == 0);
265 #ifdef ENABLE_GDB_JIT_INTERFACE
266 // If GDBJIT interface is active disable compaction.
267 if (FLAG_gdbjit) return false;
270 CollectEvacuationCandidates(heap()->old_pointer_space());
271 CollectEvacuationCandidates(heap()->old_data_space());
273 if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
274 FLAG_incremental_code_compaction)) {
275 CollectEvacuationCandidates(heap()->code_space());
276 } else if (FLAG_trace_fragmentation) {
277 TraceFragmentation(heap()->code_space());
280 if (FLAG_trace_fragmentation) {
281 TraceFragmentation(heap()->map_space());
282 TraceFragmentation(heap()->cell_space());
283 TraceFragmentation(heap()->property_cell_space());
286 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
287 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
288 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
290 compacting_ = evacuation_candidates_.length() > 0;
297 void MarkCompactCollector::CollectGarbage() {
298 // Make sure that Prepare() has been called. The individual steps below will
299 // update the state as they proceed.
300 DCHECK(state_ == PREPARE_GC);
303 DCHECK(heap_->incremental_marking()->IsStopped());
305 // ClearNonLiveReferences can deoptimize code in dependent code arrays.
306 // Process weak cells before so that weak cells in dependent code
307 // arrays are cleared or contain only live code objects.
308 ProcessAndClearWeakCells();
310 if (FLAG_collect_maps) ClearNonLiveReferences();
312 ClearWeakCollections();
314 heap_->set_encountered_weak_cells(Smi::FromInt(0));
317 if (FLAG_verify_heap) {
318 VerifyMarking(heap_);
325 VerifyWeakEmbeddedObjectsInCode();
326 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
327 VerifyOmittedMapChecks();
333 if (marking_parity_ == EVEN_MARKING_PARITY) {
334 marking_parity_ = ODD_MARKING_PARITY;
336 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
337 marking_parity_ = EVEN_MARKING_PARITY;
343 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
344 PageIterator it(space);
346 while (it.has_next()) {
348 CHECK(p->markbits()->IsClean());
349 CHECK_EQ(0, p->LiveBytes());
354 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
355 NewSpacePageIterator it(space->bottom(), space->top());
357 while (it.has_next()) {
358 NewSpacePage* p = it.next();
359 CHECK(p->markbits()->IsClean());
360 CHECK_EQ(0, p->LiveBytes());
365 void MarkCompactCollector::VerifyMarkbitsAreClean() {
366 VerifyMarkbitsAreClean(heap_->old_pointer_space());
367 VerifyMarkbitsAreClean(heap_->old_data_space());
368 VerifyMarkbitsAreClean(heap_->code_space());
369 VerifyMarkbitsAreClean(heap_->cell_space());
370 VerifyMarkbitsAreClean(heap_->property_cell_space());
371 VerifyMarkbitsAreClean(heap_->map_space());
372 VerifyMarkbitsAreClean(heap_->new_space());
374 LargeObjectIterator it(heap_->lo_space());
375 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
376 MarkBit mark_bit = Marking::MarkBitFrom(obj);
377 CHECK(Marking::IsWhite(mark_bit));
378 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
383 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
384 HeapObjectIterator code_iterator(heap()->code_space());
385 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
386 obj = code_iterator.Next()) {
387 Code* code = Code::cast(obj);
388 if (!code->is_optimized_code()) continue;
389 if (WillBeDeoptimized(code)) continue;
390 code->VerifyEmbeddedObjectsDependency();
395 void MarkCompactCollector::VerifyOmittedMapChecks() {
396 HeapObjectIterator iterator(heap()->map_space());
397 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
398 Map* map = Map::cast(obj);
399 map->VerifyOmittedMapChecks();
402 #endif // VERIFY_HEAP
405 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
406 PageIterator it(space);
408 while (it.has_next()) {
409 Bitmap::Clear(it.next());
414 static void ClearMarkbitsInNewSpace(NewSpace* space) {
415 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
417 while (it.has_next()) {
418 Bitmap::Clear(it.next());
423 void MarkCompactCollector::ClearMarkbits() {
424 ClearMarkbitsInPagedSpace(heap_->code_space());
425 ClearMarkbitsInPagedSpace(heap_->map_space());
426 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
427 ClearMarkbitsInPagedSpace(heap_->old_data_space());
428 ClearMarkbitsInPagedSpace(heap_->cell_space());
429 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
430 ClearMarkbitsInNewSpace(heap_->new_space());
432 LargeObjectIterator it(heap_->lo_space());
433 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
434 MarkBit mark_bit = Marking::MarkBitFrom(obj);
436 mark_bit.Next().Clear();
437 Page::FromAddress(obj->address())->ResetProgressBar();
438 Page::FromAddress(obj->address())->ResetLiveBytes();
443 class MarkCompactCollector::SweeperTask : public v8::Task {
445 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
447 virtual ~SweeperTask() {}
450 // v8::Task overrides.
451 void Run() OVERRIDE {
452 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
453 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
459 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
463 void MarkCompactCollector::StartSweeperThreads() {
464 DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
465 DCHECK(free_list_old_data_space_.get()->IsEmpty());
466 V8::GetCurrentPlatform()->CallOnBackgroundThread(
467 new SweeperTask(heap(), heap()->old_data_space()),
468 v8::Platform::kShortRunningTask);
469 V8::GetCurrentPlatform()->CallOnBackgroundThread(
470 new SweeperTask(heap(), heap()->old_pointer_space()),
471 v8::Platform::kShortRunningTask);
475 void MarkCompactCollector::EnsureSweepingCompleted() {
476 DCHECK(sweeping_in_progress_ == true);
478 // If sweeping is not completed or not running at all, we try to complete it
480 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
481 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
482 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
484 // Wait twice for both jobs.
485 if (FLAG_concurrent_sweeping) {
486 pending_sweeper_jobs_semaphore_.Wait();
487 pending_sweeper_jobs_semaphore_.Wait();
489 ParallelSweepSpacesComplete();
490 sweeping_in_progress_ = false;
491 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
492 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
493 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
494 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
497 if (FLAG_verify_heap && !evacuation()) {
498 VerifyEvacuation(heap_);
504 bool MarkCompactCollector::IsSweepingCompleted() {
505 if (!pending_sweeper_jobs_semaphore_.WaitFor(
506 base::TimeDelta::FromSeconds(0))) {
509 pending_sweeper_jobs_semaphore_.Signal();
514 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
517 if (space == heap()->old_pointer_space()) {
518 free_list = free_list_old_pointer_space_.get();
519 } else if (space == heap()->old_data_space()) {
520 free_list = free_list_old_data_space_.get();
522 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
523 // to only refill them for old data and pointer spaces.
527 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
528 space->AddToAccountingStats(freed_bytes);
529 space->DecrementUnsweptFreeBytes(freed_bytes);
533 void Marking::TransferMark(Address old_start, Address new_start) {
534 // This is only used when resizing an object.
535 DCHECK(MemoryChunk::FromAddress(old_start) ==
536 MemoryChunk::FromAddress(new_start));
538 if (!heap_->incremental_marking()->IsMarking()) return;
540 // If the mark doesn't move, we don't check the color of the object.
541 // It doesn't matter whether the object is black, since it hasn't changed
542 // size, so the adjustment to the live data count will be zero anyway.
543 if (old_start == new_start) return;
545 MarkBit new_mark_bit = MarkBitFrom(new_start);
546 MarkBit old_mark_bit = MarkBitFrom(old_start);
549 ObjectColor old_color = Color(old_mark_bit);
552 if (Marking::IsBlack(old_mark_bit)) {
553 old_mark_bit.Clear();
554 DCHECK(IsWhite(old_mark_bit));
555 Marking::MarkBlack(new_mark_bit);
557 } else if (Marking::IsGrey(old_mark_bit)) {
558 old_mark_bit.Clear();
559 old_mark_bit.Next().Clear();
560 DCHECK(IsWhite(old_mark_bit));
561 heap_->incremental_marking()->WhiteToGreyAndPush(
562 HeapObject::FromAddress(new_start), new_mark_bit);
563 heap_->incremental_marking()->RestartIfNotMarking();
567 ObjectColor new_color = Color(new_mark_bit);
568 DCHECK(new_color == old_color);
573 const char* AllocationSpaceName(AllocationSpace space) {
577 case OLD_POINTER_SPACE:
578 return "OLD_POINTER_SPACE";
580 return "OLD_DATA_SPACE";
587 case PROPERTY_CELL_SPACE:
588 return "PROPERTY_CELL_SPACE";
599 // Returns zero for pages that have so little fragmentation that it is not
600 // worth defragmenting them. Otherwise a positive integer that gives an
601 // estimate of fragmentation on an arbitrary scale.
602 static int FreeListFragmentation(PagedSpace* space, Page* p) {
603 // If page was not swept then there are no free list items on it.
604 if (!p->WasSwept()) {
605 if (FLAG_trace_fragmentation) {
606 PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
607 AllocationSpaceName(space->identity()), p->LiveBytes());
612 PagedSpace::SizeStats sizes;
613 space->ObtainFreeListStatistics(p, &sizes);
616 intptr_t ratio_threshold;
617 intptr_t area_size = space->AreaSize();
618 if (space->identity() == CODE_SPACE) {
619 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
620 ratio_threshold = 10;
622 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
623 ratio_threshold = 15;
626 if (FLAG_trace_fragmentation) {
627 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
628 reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
629 static_cast<int>(sizes.small_size_),
630 static_cast<double>(sizes.small_size_ * 100) / area_size,
631 static_cast<int>(sizes.medium_size_),
632 static_cast<double>(sizes.medium_size_ * 100) / area_size,
633 static_cast<int>(sizes.large_size_),
634 static_cast<double>(sizes.large_size_ * 100) / area_size,
635 static_cast<int>(sizes.huge_size_),
636 static_cast<double>(sizes.huge_size_ * 100) / area_size,
637 (ratio > ratio_threshold) ? "[fragmented]" : "");
640 if (FLAG_always_compact && sizes.Total() != area_size) {
644 if (ratio <= ratio_threshold) return 0; // Not fragmented.
646 return static_cast<int>(ratio - ratio_threshold);
650 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
651 DCHECK(space->identity() == OLD_POINTER_SPACE ||
652 space->identity() == OLD_DATA_SPACE ||
653 space->identity() == CODE_SPACE);
655 static const int kMaxMaxEvacuationCandidates = 1000;
656 int number_of_pages = space->CountTotalPages();
657 int max_evacuation_candidates =
658 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
660 if (FLAG_stress_compaction || FLAG_always_compact) {
661 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
666 Candidate() : fragmentation_(0), page_(NULL) {}
667 Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
669 int fragmentation() { return fragmentation_; }
670 Page* page() { return page_; }
677 enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
679 CompactionMode mode = COMPACT_FREE_LISTS;
681 intptr_t reserved = number_of_pages * space->AreaSize();
682 intptr_t over_reserved = reserved - space->SizeOfObjects();
683 static const intptr_t kFreenessThreshold = 50;
685 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
686 // If reduction of memory footprint was requested, we are aggressive
687 // about choosing pages to free. We expect that half-empty pages
688 // are easier to compact so slightly bump the limit.
689 mode = REDUCE_MEMORY_FOOTPRINT;
690 max_evacuation_candidates += 2;
694 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
695 // If over-usage is very high (more than a third of the space), we
696 // try to free all mostly empty pages. We expect that almost empty
697 // pages are even easier to compact so bump the limit even more.
698 mode = REDUCE_MEMORY_FOOTPRINT;
699 max_evacuation_candidates *= 2;
702 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
704 "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
705 "evacuation candidate limit: %d\n",
706 static_cast<double>(over_reserved) / MB,
707 static_cast<double>(reserved) / MB,
708 static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
711 intptr_t estimated_release = 0;
713 Candidate candidates[kMaxMaxEvacuationCandidates];
715 max_evacuation_candidates =
716 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
719 int fragmentation = 0;
720 Candidate* least = NULL;
722 PageIterator it(space);
723 while (it.has_next()) {
725 if (p->NeverEvacuate()) continue;
726 p->ClearEvacuationCandidate();
728 if (FLAG_stress_compaction) {
729 unsigned int counter = space->heap()->ms_count();
730 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
731 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
732 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
733 // Don't try to release too many pages.
734 if (estimated_release >= over_reserved) {
738 intptr_t free_bytes = 0;
740 if (!p->WasSwept()) {
741 free_bytes = (p->area_size() - p->LiveBytes());
743 PagedSpace::SizeStats sizes;
744 space->ObtainFreeListStatistics(p, &sizes);
745 free_bytes = sizes.Total();
748 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
750 if (free_pct >= kFreenessThreshold) {
751 estimated_release += free_bytes;
752 fragmentation = free_pct;
757 if (FLAG_trace_fragmentation) {
758 PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
759 AllocationSpaceName(space->identity()),
760 static_cast<int>(free_bytes),
761 static_cast<double>(free_bytes * 100) / p->area_size(),
762 (fragmentation > 0) ? "[fragmented]" : "");
765 fragmentation = FreeListFragmentation(space, p);
768 if (fragmentation != 0) {
769 if (count < max_evacuation_candidates) {
770 candidates[count++] = Candidate(fragmentation, p);
773 for (int i = 0; i < max_evacuation_candidates; i++) {
775 candidates[i].fragmentation() < least->fragmentation()) {
776 least = candidates + i;
780 if (least->fragmentation() < fragmentation) {
781 *least = Candidate(fragmentation, p);
788 for (int i = 0; i < count; i++) {
789 AddEvacuationCandidate(candidates[i].page());
792 if (count > 0 && FLAG_trace_fragmentation) {
793 PrintF("Collected %d evacuation candidates for space %s\n", count,
794 AllocationSpaceName(space->identity()));
799 void MarkCompactCollector::AbortCompaction() {
801 int npages = evacuation_candidates_.length();
802 for (int i = 0; i < npages; i++) {
803 Page* p = evacuation_candidates_[i];
804 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
805 p->ClearEvacuationCandidate();
806 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
809 evacuation_candidates_.Rewind(0);
810 invalidated_code_.Rewind(0);
812 DCHECK_EQ(0, evacuation_candidates_.length());
816 void MarkCompactCollector::Prepare() {
817 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
820 DCHECK(state_ == IDLE);
824 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
826 if (sweeping_in_progress()) {
827 // Instead of waiting we could also abort the sweeper threads here.
828 EnsureSweepingCompleted();
831 // Clear marking bits if incremental marking is aborted.
832 if (was_marked_incrementally_ && abort_incremental_marking_) {
833 heap()->incremental_marking()->Abort();
835 AbortWeakCollections();
838 was_marked_incrementally_ = false;
841 // Don't start compaction if we are in the middle of incremental
842 // marking cycle. We did not collect any slots.
843 if (!FLAG_never_compact && !was_marked_incrementally_) {
844 StartCompaction(NON_INCREMENTAL_COMPACTION);
847 PagedSpaces spaces(heap());
848 for (PagedSpace* space = spaces.next(); space != NULL;
849 space = spaces.next()) {
850 space->PrepareForMarkCompact();
854 if (!was_marked_incrementally_ && FLAG_verify_heap) {
855 VerifyMarkbitsAreClean();
861 void MarkCompactCollector::Finish() {
863 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
866 // The stub cache is not traversed during GC; clear the cache to
867 // force lazy re-initialization of it. This must be done after the
868 // GC, because it relies on the new address of certain old space
869 // objects (empty string, illegal builtin).
870 isolate()->stub_cache()->Clear();
872 if (have_code_to_deoptimize_) {
873 // Some code objects were marked for deoptimization during the GC.
874 Deoptimizer::DeoptimizeMarkedCode(isolate());
875 have_code_to_deoptimize_ = false;
878 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
882 // -------------------------------------------------------------------------
883 // Phase 1: tracing and marking live objects.
884 // before: all objects are in normal state.
885 // after: a live object's map pointer is marked as '00'.
887 // Marking all live objects in the heap as part of mark-sweep or mark-compact
888 // collection. Before marking, all objects are in their normal state. After
889 // marking, live objects' map pointers are marked indicating that the object
890 // has been found reachable.
892 // The marking algorithm is a (mostly) depth-first (because of possible stack
893 // overflow) traversal of the graph of objects reachable from the roots. It
894 // uses an explicit stack of pointers rather than recursion. The young
895 // generation's inactive ('from') space is used as a marking stack. The
896 // objects in the marking stack are the ones that have been reached and marked
897 // but their children have not yet been visited.
899 // The marking stack can overflow during traversal. In that case, we set an
900 // overflow flag. When the overflow flag is set, we continue marking objects
901 // reachable from the objects on the marking stack, but no longer push them on
902 // the marking stack. Instead, we mark them as both marked and overflowed.
903 // When the stack is in the overflowed state, objects marked as overflowed
904 // have been reached and marked but their children have not been visited yet.
905 // After emptying the marking stack, we clear the overflow flag and traverse
906 // the heap looking for objects marked as overflowed, push them on the stack,
907 // and continue with marking. This process repeats until all reachable
908 // objects have been marked.
910 void CodeFlusher::ProcessJSFunctionCandidates() {
911 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
912 Object* undefined = isolate_->heap()->undefined_value();
914 JSFunction* candidate = jsfunction_candidates_head_;
915 JSFunction* next_candidate;
916 while (candidate != NULL) {
917 next_candidate = GetNextCandidate(candidate);
918 ClearNextCandidate(candidate, undefined);
920 SharedFunctionInfo* shared = candidate->shared();
922 Code* code = shared->code();
923 MarkBit code_mark = Marking::MarkBitFrom(code);
924 if (!code_mark.Get()) {
925 if (FLAG_trace_code_flushing && shared->is_compiled()) {
926 PrintF("[code-flushing clears: ");
927 shared->ShortPrint();
928 PrintF(" - age: %d]\n", code->GetAge());
930 shared->set_code(lazy_compile);
931 candidate->set_code(lazy_compile);
933 candidate->set_code(code);
936 // We are in the middle of a GC cycle so the write barrier in the code
937 // setter did not record the slot update and we have to do that manually.
938 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
939 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
940 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
943 Object** shared_code_slot =
944 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
945 isolate_->heap()->mark_compact_collector()->RecordSlot(
946 shared_code_slot, shared_code_slot, *shared_code_slot);
948 candidate = next_candidate;
951 jsfunction_candidates_head_ = NULL;
955 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
956 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
958 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
959 SharedFunctionInfo* next_candidate;
960 while (candidate != NULL) {
961 next_candidate = GetNextCandidate(candidate);
962 ClearNextCandidate(candidate);
964 Code* code = candidate->code();
965 MarkBit code_mark = Marking::MarkBitFrom(code);
966 if (!code_mark.Get()) {
967 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
968 PrintF("[code-flushing clears: ");
969 candidate->ShortPrint();
970 PrintF(" - age: %d]\n", code->GetAge());
972 candidate->set_code(lazy_compile);
976 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
977 isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
980 candidate = next_candidate;
983 shared_function_info_candidates_head_ = NULL;
987 void CodeFlusher::ProcessOptimizedCodeMaps() {
988 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
990 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
991 SharedFunctionInfo* next_holder;
993 while (holder != NULL) {
994 next_holder = GetNextCodeMap(holder);
995 ClearNextCodeMap(holder);
997 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
998 int new_length = SharedFunctionInfo::kEntriesStart;
999 int old_length = code_map->length();
1000 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
1001 i += SharedFunctionInfo::kEntryLength) {
1003 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1004 if (!Marking::MarkBitFrom(code).Get()) continue;
1006 // Move every slot in the entry.
1007 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1008 int dst_index = new_length++;
1009 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1010 Object* object = code_map->get(i + j);
1011 code_map->set(dst_index, object);
1012 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1013 DCHECK(object->IsSmi());
1016 Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
1017 isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
1023 // Trim the optimized code map if entries have been removed.
1024 if (new_length < old_length) {
1025 holder->TrimOptimizedCodeMap(old_length - new_length);
1028 holder = next_holder;
1031 optimized_code_map_holder_head_ = NULL;
1035 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1036 // Make sure previous flushing decisions are revisited.
1037 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1039 if (FLAG_trace_code_flushing) {
1040 PrintF("[code-flushing abandons function-info: ");
1041 shared_info->ShortPrint();
1045 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1046 SharedFunctionInfo* next_candidate;
1047 if (candidate == shared_info) {
1048 next_candidate = GetNextCandidate(shared_info);
1049 shared_function_info_candidates_head_ = next_candidate;
1050 ClearNextCandidate(shared_info);
1052 while (candidate != NULL) {
1053 next_candidate = GetNextCandidate(candidate);
1055 if (next_candidate == shared_info) {
1056 next_candidate = GetNextCandidate(shared_info);
1057 SetNextCandidate(candidate, next_candidate);
1058 ClearNextCandidate(shared_info);
1062 candidate = next_candidate;
1068 void CodeFlusher::EvictCandidate(JSFunction* function) {
1069 DCHECK(!function->next_function_link()->IsUndefined());
1070 Object* undefined = isolate_->heap()->undefined_value();
1072 // Make sure previous flushing decisions are revisited.
1073 isolate_->heap()->incremental_marking()->RecordWrites(function);
1074 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1076 if (FLAG_trace_code_flushing) {
1077 PrintF("[code-flushing abandons closure: ");
1078 function->shared()->ShortPrint();
1082 JSFunction* candidate = jsfunction_candidates_head_;
1083 JSFunction* next_candidate;
1084 if (candidate == function) {
1085 next_candidate = GetNextCandidate(function);
1086 jsfunction_candidates_head_ = next_candidate;
1087 ClearNextCandidate(function, undefined);
1089 while (candidate != NULL) {
1090 next_candidate = GetNextCandidate(candidate);
1092 if (next_candidate == function) {
1093 next_candidate = GetNextCandidate(function);
1094 SetNextCandidate(candidate, next_candidate);
1095 ClearNextCandidate(function, undefined);
1099 candidate = next_candidate;
1105 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1106 DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
1107 ->get(SharedFunctionInfo::kNextMapIndex)
1110 // Make sure previous flushing decisions are revisited.
1111 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1113 if (FLAG_trace_code_flushing) {
1114 PrintF("[code-flushing abandons code-map: ");
1115 code_map_holder->ShortPrint();
1119 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1120 SharedFunctionInfo* next_holder;
1121 if (holder == code_map_holder) {
1122 next_holder = GetNextCodeMap(code_map_holder);
1123 optimized_code_map_holder_head_ = next_holder;
1124 ClearNextCodeMap(code_map_holder);
1126 while (holder != NULL) {
1127 next_holder = GetNextCodeMap(holder);
1129 if (next_holder == code_map_holder) {
1130 next_holder = GetNextCodeMap(code_map_holder);
1131 SetNextCodeMap(holder, next_holder);
1132 ClearNextCodeMap(code_map_holder);
1136 holder = next_holder;
1142 void CodeFlusher::EvictJSFunctionCandidates() {
1143 JSFunction* candidate = jsfunction_candidates_head_;
1144 JSFunction* next_candidate;
1145 while (candidate != NULL) {
1146 next_candidate = GetNextCandidate(candidate);
1147 EvictCandidate(candidate);
1148 candidate = next_candidate;
1150 DCHECK(jsfunction_candidates_head_ == NULL);
1154 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1155 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1156 SharedFunctionInfo* next_candidate;
1157 while (candidate != NULL) {
1158 next_candidate = GetNextCandidate(candidate);
1159 EvictCandidate(candidate);
1160 candidate = next_candidate;
1162 DCHECK(shared_function_info_candidates_head_ == NULL);
1166 void CodeFlusher::EvictOptimizedCodeMaps() {
1167 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1168 SharedFunctionInfo* next_holder;
1169 while (holder != NULL) {
1170 next_holder = GetNextCodeMap(holder);
1171 EvictOptimizedCodeMap(holder);
1172 holder = next_holder;
1174 DCHECK(optimized_code_map_holder_head_ == NULL);
1178 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1179 Heap* heap = isolate_->heap();
1181 JSFunction** slot = &jsfunction_candidates_head_;
1182 JSFunction* candidate = jsfunction_candidates_head_;
1183 while (candidate != NULL) {
1184 if (heap->InFromSpace(candidate)) {
1185 v->VisitPointer(reinterpret_cast<Object**>(slot));
1187 candidate = GetNextCandidate(*slot);
1188 slot = GetNextCandidateSlot(*slot);
1193 MarkCompactCollector::~MarkCompactCollector() {
1194 if (code_flusher_ != NULL) {
1195 delete code_flusher_;
1196 code_flusher_ = NULL;
1201 static inline HeapObject* ShortCircuitConsString(Object** p) {
1202 // Optimization: If the heap object pointed to by p is a non-internalized
1203 // cons string whose right substring is HEAP->empty_string, update
1204 // it in place to its left substring. Return the updated value.
1206 // Here we assume that if we change *p, we replace it with a heap object
1207 // (i.e., the left substring of a cons string is always a heap object).
1209 // The check performed is:
1210 // object->IsConsString() && !object->IsInternalizedString() &&
1211 // (ConsString::cast(object)->second() == HEAP->empty_string())
1212 // except the maps for the object and its possible substrings might be
1214 HeapObject* object = HeapObject::cast(*p);
1215 Map* map = object->map();
1216 InstanceType type = map->instance_type();
1217 if (!IsShortcutCandidate(type)) return object;
1219 Object* second = reinterpret_cast<ConsString*>(object)->second();
1220 Heap* heap = map->GetHeap();
1221 if (second != heap->empty_string()) {
1225 // Since we don't have the object's start, it is impossible to update the
1226 // page dirty marks. Therefore, we only replace the string with its left
1227 // substring when page dirty marks do not change.
1228 Object* first = reinterpret_cast<ConsString*>(object)->first();
1229 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1232 return HeapObject::cast(first);
1236 class MarkCompactMarkingVisitor
1237 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1239 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
1242 static void ObjectStatsCountFixedArray(
1243 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1244 FixedArraySubInstanceType dictionary_type);
1246 template <MarkCompactMarkingVisitor::VisitorId id>
1247 class ObjectStatsTracker {
1249 static inline void Visit(Map* map, HeapObject* obj);
1252 static void Initialize();
1254 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1255 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1258 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1259 // Mark all objects pointed to in [start, end).
1260 const int kMinRangeForMarkingRecursion = 64;
1261 if (end - start >= kMinRangeForMarkingRecursion) {
1262 if (VisitUnmarkedObjects(heap, start, end)) return;
1263 // We are close to a stack overflow, so just mark the objects.
1265 MarkCompactCollector* collector = heap->mark_compact_collector();
1266 for (Object** p = start; p < end; p++) {
1267 MarkObjectByPointer(collector, start, p);
1271 // Marks the object black and pushes it on the marking stack.
1272 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1273 MarkBit mark = Marking::MarkBitFrom(object);
1274 heap->mark_compact_collector()->MarkObject(object, mark);
1277 // Marks the object black without pushing it on the marking stack.
1278 // Returns true if object needed marking and false otherwise.
1279 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1280 MarkBit mark_bit = Marking::MarkBitFrom(object);
1281 if (!mark_bit.Get()) {
1282 heap->mark_compact_collector()->SetMark(object, mark_bit);
1288 // Mark object pointed to by p.
1289 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1290 Object** anchor_slot, Object** p)) {
1291 if (!(*p)->IsHeapObject()) return;
1292 HeapObject* object = ShortCircuitConsString(p);
1293 collector->RecordSlot(anchor_slot, p, object);
1294 MarkBit mark = Marking::MarkBitFrom(object);
1295 collector->MarkObject(object, mark);
1299 // Visit an unmarked object.
1300 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1303 DCHECK(collector->heap()->Contains(obj));
1304 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1306 Map* map = obj->map();
1307 Heap* heap = obj->GetHeap();
1308 MarkBit mark = Marking::MarkBitFrom(obj);
1309 heap->mark_compact_collector()->SetMark(obj, mark);
1310 // Mark the map pointer and the body.
1311 MarkBit map_mark = Marking::MarkBitFrom(map);
1312 heap->mark_compact_collector()->MarkObject(map, map_mark);
1313 IterateBody(map, obj);
1316 // Visit all unmarked objects pointed to by [start, end).
1317 // Returns false if the operation fails (lack of stack space).
1318 INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
1320 // Return false is we are close to the stack limit.
1321 StackLimitCheck check(heap->isolate());
1322 if (check.HasOverflowed()) return false;
1324 MarkCompactCollector* collector = heap->mark_compact_collector();
1325 // Visit the unmarked objects.
1326 for (Object** p = start; p < end; p++) {
1328 if (!o->IsHeapObject()) continue;
1329 collector->RecordSlot(start, p, o);
1330 HeapObject* obj = HeapObject::cast(o);
1331 MarkBit mark = Marking::MarkBitFrom(obj);
1332 if (mark.Get()) continue;
1333 VisitUnmarkedObject(collector, obj);
1340 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1342 // Code flushing support.
1344 static const int kRegExpCodeThreshold = 5;
1346 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1348 // Make sure that the fixed array is in fact initialized on the RegExp.
1349 // We could potentially trigger a GC when initializing the RegExp.
1350 if (HeapObject::cast(re->data())->map()->instance_type() !=
1354 // Make sure this is a RegExp that actually contains code.
1355 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1357 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1358 if (!code->IsSmi() &&
1359 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1360 // Save a copy that can be reinstated if we need the code again.
1361 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1363 // Saving a copy might create a pointer into compaction candidate
1364 // that was not observed by marker. This might happen if JSRegExp data
1365 // was marked through the compilation cache before marker reached JSRegExp
1367 FixedArray* data = FixedArray::cast(re->data());
1369 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1370 heap->mark_compact_collector()->RecordSlot(slot, slot, code);
1372 // Set a number in the 0-255 range to guarantee no smi overflow.
1373 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1374 Smi::FromInt(heap->sweep_generation() & 0xff));
1375 } else if (code->IsSmi()) {
1376 int value = Smi::cast(code)->value();
1377 // The regexp has not been compiled yet or there was a compilation error.
1378 if (value == JSRegExp::kUninitializedValue ||
1379 value == JSRegExp::kCompilationErrorValue) {
1383 // Check if we should flush now.
1384 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1385 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1386 Smi::FromInt(JSRegExp::kUninitializedValue));
1387 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1388 Smi::FromInt(JSRegExp::kUninitializedValue));
1394 // Works by setting the current sweep_generation (as a smi) in the
1395 // code object place in the data array of the RegExp and keeps a copy
1396 // around that can be reinstated if we reuse the RegExp before flushing.
1397 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1398 // we flush the code.
1399 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1400 Heap* heap = map->GetHeap();
1401 MarkCompactCollector* collector = heap->mark_compact_collector();
1402 if (!collector->is_code_flushing_enabled()) {
1403 VisitJSRegExp(map, object);
1406 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1407 // Flush code or set age on both one byte and two byte code.
1408 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1409 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1410 // Visit the fields of the RegExp, including the updated FixedArray.
1411 VisitJSRegExp(map, object);
1414 static VisitorDispatchTable<Callback> non_count_table_;
1418 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1419 FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
1420 FixedArraySubInstanceType dictionary_type) {
1421 Heap* heap = fixed_array->map()->GetHeap();
1422 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1423 fixed_array->map() != heap->fixed_double_array_map() &&
1424 fixed_array != heap->empty_fixed_array()) {
1425 if (fixed_array->IsDictionary()) {
1426 heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
1428 heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
1434 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1435 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1436 Heap* heap = map->GetHeap();
1437 int object_size = obj->Size();
1438 heap->RecordObjectStats(map->instance_type(), object_size);
1439 non_count_table_.GetVisitorById(id)(map, obj);
1440 if (obj->IsJSObject()) {
1441 JSObject* object = JSObject::cast(obj);
1442 ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
1443 FAST_ELEMENTS_SUB_TYPE);
1444 ObjectStatsCountFixedArray(object->properties(),
1445 DICTIONARY_PROPERTIES_SUB_TYPE,
1446 FAST_PROPERTIES_SUB_TYPE);
1451 template <MarkCompactMarkingVisitor::VisitorId id>
1452 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
1454 ObjectStatsVisitBase(id, map, obj);
1459 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1460 MarkCompactMarkingVisitor::kVisitMap> {
1462 static inline void Visit(Map* map, HeapObject* obj) {
1463 Heap* heap = map->GetHeap();
1464 Map* map_obj = Map::cast(obj);
1465 DCHECK(map->instance_type() == MAP_TYPE);
1466 DescriptorArray* array = map_obj->instance_descriptors();
1467 if (map_obj->owns_descriptors() &&
1468 array != heap->empty_descriptor_array()) {
1469 int fixed_array_size = array->Size();
1470 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1473 if (map_obj->HasTransitionArray()) {
1474 int fixed_array_size = map_obj->transitions()->Size();
1475 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1478 if (map_obj->has_code_cache()) {
1479 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1480 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1481 cache->default_cache()->Size());
1482 if (!cache->normal_type_cache()->IsUndefined()) {
1483 heap->RecordFixedArraySubTypeStats(
1484 MAP_CODE_CACHE_SUB_TYPE,
1485 FixedArray::cast(cache->normal_type_cache())->Size());
1488 ObjectStatsVisitBase(kVisitMap, map, obj);
1494 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1495 MarkCompactMarkingVisitor::kVisitCode> {
1497 static inline void Visit(Map* map, HeapObject* obj) {
1498 Heap* heap = map->GetHeap();
1499 int object_size = obj->Size();
1500 DCHECK(map->instance_type() == CODE_TYPE);
1501 Code* code_obj = Code::cast(obj);
1502 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1504 ObjectStatsVisitBase(kVisitCode, map, obj);
1510 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1511 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1513 static inline void Visit(Map* map, HeapObject* obj) {
1514 Heap* heap = map->GetHeap();
1515 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1516 if (sfi->scope_info() != heap->empty_fixed_array()) {
1517 heap->RecordFixedArraySubTypeStats(
1518 SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
1520 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1526 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1527 MarkCompactMarkingVisitor::kVisitFixedArray> {
1529 static inline void Visit(Map* map, HeapObject* obj) {
1530 Heap* heap = map->GetHeap();
1531 FixedArray* fixed_array = FixedArray::cast(obj);
1532 if (fixed_array == heap->string_table()) {
1533 heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
1534 fixed_array->Size());
1536 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1541 void MarkCompactMarkingVisitor::Initialize() {
1542 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1544 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1546 if (FLAG_track_gc_object_stats) {
1547 // Copy the visitor table to make call-through possible.
1548 non_count_table_.CopyFrom(&table_);
1549 #define VISITOR_ID_COUNT_FUNCTION(id) \
1550 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1551 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1552 #undef VISITOR_ID_COUNT_FUNCTION
1557 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1558 MarkCompactMarkingVisitor::non_count_table_;
1561 class CodeMarkingVisitor : public ThreadVisitor {
1563 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1564 : collector_(collector) {}
1566 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1567 collector_->PrepareThreadForCodeFlushing(isolate, top);
1571 MarkCompactCollector* collector_;
1575 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1577 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1578 : collector_(collector) {}
1580 void VisitPointers(Object** start, Object** end) {
1581 for (Object** p = start; p < end; p++) VisitPointer(p);
1584 void VisitPointer(Object** slot) {
1585 Object* obj = *slot;
1586 if (obj->IsSharedFunctionInfo()) {
1587 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1588 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1589 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1590 collector_->MarkObject(shared->code(), code_mark);
1591 collector_->MarkObject(shared, shared_mark);
1596 MarkCompactCollector* collector_;
1600 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1601 ThreadLocalTop* top) {
1602 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1603 // Note: for the frame that has a pending lazy deoptimization
1604 // StackFrame::unchecked_code will return a non-optimized code object for
1605 // the outermost function and StackFrame::LookupCode will return
1606 // actual optimized code object.
1607 StackFrame* frame = it.frame();
1608 Code* code = frame->unchecked_code();
1609 MarkBit code_mark = Marking::MarkBitFrom(code);
1610 MarkObject(code, code_mark);
1611 if (frame->is_optimized()) {
1612 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1613 frame->LookupCode());
1619 void MarkCompactCollector::PrepareForCodeFlushing() {
1620 // Enable code flushing for non-incremental cycles.
1621 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1622 EnableCodeFlushing(!was_marked_incrementally_);
1625 // If code flushing is disabled, there is no need to prepare for it.
1626 if (!is_code_flushing_enabled()) return;
1628 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1629 // relies on it being marked before any other descriptor array.
1630 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1631 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1632 MarkObject(descriptor_array, descriptor_array_mark);
1634 // Make sure we are not referencing the code from the stack.
1635 DCHECK(this == heap()->mark_compact_collector());
1636 PrepareThreadForCodeFlushing(heap()->isolate(),
1637 heap()->isolate()->thread_local_top());
1639 // Iterate the archived stacks in all threads to check if
1640 // the code is referenced.
1641 CodeMarkingVisitor code_marking_visitor(this);
1642 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1643 &code_marking_visitor);
1645 SharedFunctionInfoMarkingVisitor visitor(this);
1646 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1647 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1649 ProcessMarkingDeque();
1653 // Visitor class for marking heap roots.
1654 class RootMarkingVisitor : public ObjectVisitor {
1656 explicit RootMarkingVisitor(Heap* heap)
1657 : collector_(heap->mark_compact_collector()) {}
1659 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
1661 void VisitPointers(Object** start, Object** end) {
1662 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1665 // Skip the weak next code link in a code object, which is visited in
1666 // ProcessTopOptimizedFrame.
1667 void VisitNextCodeLink(Object** p) {}
1670 void MarkObjectByPointer(Object** p) {
1671 if (!(*p)->IsHeapObject()) return;
1673 // Replace flat cons strings in place.
1674 HeapObject* object = ShortCircuitConsString(p);
1675 MarkBit mark_bit = Marking::MarkBitFrom(object);
1676 if (mark_bit.Get()) return;
1678 Map* map = object->map();
1680 collector_->SetMark(object, mark_bit);
1682 // Mark the map pointer and body, and push them on the marking stack.
1683 MarkBit map_mark = Marking::MarkBitFrom(map);
1684 collector_->MarkObject(map, map_mark);
1685 MarkCompactMarkingVisitor::IterateBody(map, object);
1687 // Mark all the objects reachable from the map and body. May leave
1688 // overflowed objects in the heap.
1689 collector_->EmptyMarkingDeque();
1692 MarkCompactCollector* collector_;
1696 // Helper class for pruning the string table.
1697 template <bool finalize_external_strings>
1698 class StringTableCleaner : public ObjectVisitor {
1700 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1702 virtual void VisitPointers(Object** start, Object** end) {
1703 // Visit all HeapObject pointers in [start, end).
1704 for (Object** p = start; p < end; p++) {
1706 if (o->IsHeapObject() &&
1707 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1708 if (finalize_external_strings) {
1709 DCHECK(o->IsExternalString());
1710 heap_->FinalizeExternalString(String::cast(*p));
1712 pointers_removed_++;
1714 // Set the entry to the_hole_value (as deleted).
1715 *p = heap_->the_hole_value();
1720 int PointersRemoved() {
1721 DCHECK(!finalize_external_strings);
1722 return pointers_removed_;
1727 int pointers_removed_;
1731 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1732 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1735 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1737 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1739 virtual Object* RetainAs(Object* object) {
1740 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1742 } else if (object->IsAllocationSite() &&
1743 !(AllocationSite::cast(object)->IsZombie())) {
1744 // "dead" AllocationSites need to live long enough for a traversal of new
1745 // space. These sites get a one-time reprieve.
1746 AllocationSite* site = AllocationSite::cast(object);
1748 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1757 // Fill the marking stack with overflowed objects returned by the given
1758 // iterator. Stop when the marking stack is filled or the end of the space
1759 // is reached, whichever comes first.
1761 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1762 MarkingDeque* marking_deque,
1764 // The caller should ensure that the marking stack is initially not full,
1765 // so that we don't waste effort pointlessly scanning for objects.
1766 DCHECK(!marking_deque->IsFull());
1768 Map* filler_map = heap->one_pointer_filler_map();
1769 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1770 MarkBit markbit = Marking::MarkBitFrom(object);
1771 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1772 Marking::GreyToBlack(markbit);
1773 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1774 marking_deque->PushBlack(object);
1775 if (marking_deque->IsFull()) return;
1781 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1784 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1786 DCHECK(!marking_deque->IsFull());
1787 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1788 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1789 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1790 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1792 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1793 Address cell_base = it.CurrentCellBase();
1794 MarkBit::CellType* cell = it.CurrentCell();
1796 const MarkBit::CellType current_cell = *cell;
1797 if (current_cell == 0) continue;
1799 MarkBit::CellType grey_objects;
1801 const MarkBit::CellType next_cell = *(cell + 1);
1802 grey_objects = current_cell & ((current_cell >> 1) |
1803 (next_cell << (Bitmap::kBitsPerCell - 1)));
1805 grey_objects = current_cell & (current_cell >> 1);
1809 while (grey_objects != 0) {
1810 int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1811 grey_objects >>= trailing_zeros;
1812 offset += trailing_zeros;
1813 MarkBit markbit(cell, 1 << offset, false);
1814 DCHECK(Marking::IsGrey(markbit));
1815 Marking::GreyToBlack(markbit);
1816 Address addr = cell_base + offset * kPointerSize;
1817 HeapObject* object = HeapObject::FromAddress(addr);
1818 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1819 marking_deque->PushBlack(object);
1820 if (marking_deque->IsFull()) return;
1825 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1830 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1831 NewSpace* new_space, NewSpacePage* p) {
1832 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1833 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1834 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1835 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1837 MarkBit::CellType* cells = p->markbits()->cells();
1838 int survivors_size = 0;
1840 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1841 Address cell_base = it.CurrentCellBase();
1842 MarkBit::CellType* cell = it.CurrentCell();
1844 MarkBit::CellType current_cell = *cell;
1845 if (current_cell == 0) continue;
1848 while (current_cell != 0) {
1849 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1850 current_cell >>= trailing_zeros;
1851 offset += trailing_zeros;
1852 Address address = cell_base + offset * kPointerSize;
1853 HeapObject* object = HeapObject::FromAddress(address);
1855 int size = object->Size();
1856 survivors_size += size;
1858 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1863 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1864 if (heap()->ShouldBePromoted(object->address(), size) &&
1865 TryPromoteObject(object, size)) {
1869 AllocationResult allocation = new_space->AllocateRaw(size);
1870 if (allocation.IsRetry()) {
1871 if (!new_space->AddFreshPage()) {
1872 // Shouldn't happen. We are sweeping linearly, and to-space
1873 // has the same number of pages as from-space, so there is
1877 allocation = new_space->AllocateRaw(size);
1878 DCHECK(!allocation.IsRetry());
1880 Object* target = allocation.ToObjectChecked();
1882 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
1883 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1887 return survivors_size;
1891 static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
1892 PagedSpace* space) {
1893 PageIterator it(space);
1894 while (it.has_next()) {
1895 Page* p = it.next();
1896 DiscoverGreyObjectsOnPage(marking_deque, p);
1897 if (marking_deque->IsFull()) return;
1902 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
1903 MarkingDeque* marking_deque) {
1904 NewSpace* space = heap->new_space();
1905 NewSpacePageIterator it(space->bottom(), space->top());
1906 while (it.has_next()) {
1907 NewSpacePage* page = it.next();
1908 DiscoverGreyObjectsOnPage(marking_deque, page);
1909 if (marking_deque->IsFull()) return;
1914 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1916 if (!o->IsHeapObject()) return false;
1917 HeapObject* heap_object = HeapObject::cast(o);
1918 MarkBit mark = Marking::MarkBitFrom(heap_object);
1923 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1926 DCHECK(o->IsHeapObject());
1927 HeapObject* heap_object = HeapObject::cast(o);
1928 MarkBit mark = Marking::MarkBitFrom(heap_object);
1933 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1934 StringTable* string_table = heap()->string_table();
1935 // Mark the string table itself.
1936 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1937 if (!string_table_mark.Get()) {
1938 // String table could have already been marked by visiting the handles list.
1939 SetMark(string_table, string_table_mark);
1941 // Explicitly mark the prefix.
1942 string_table->IteratePrefix(visitor);
1943 ProcessMarkingDeque();
1947 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1948 MarkBit mark_bit = Marking::MarkBitFrom(site);
1949 SetMark(site, mark_bit);
1953 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1954 // Mark the heap roots including global variables, stack variables,
1955 // etc., and all objects reachable from them.
1956 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1958 // Handle the string table specially.
1959 MarkStringTable(visitor);
1961 // There may be overflowed objects in the heap. Visit them now.
1962 while (marking_deque_.overflowed()) {
1963 RefillMarkingDeque();
1964 EmptyMarkingDeque();
1969 void MarkCompactCollector::MarkImplicitRefGroups() {
1970 List<ImplicitRefGroup*>* ref_groups =
1971 isolate()->global_handles()->implicit_ref_groups();
1974 for (int i = 0; i < ref_groups->length(); i++) {
1975 ImplicitRefGroup* entry = ref_groups->at(i);
1976 DCHECK(entry != NULL);
1978 if (!IsMarked(*entry->parent)) {
1979 (*ref_groups)[last++] = entry;
1983 Object*** children = entry->children;
1984 // A parent object is marked, so mark all child heap objects.
1985 for (size_t j = 0; j < entry->length; ++j) {
1986 if ((*children[j])->IsHeapObject()) {
1987 HeapObject* child = HeapObject::cast(*children[j]);
1988 MarkBit mark = Marking::MarkBitFrom(child);
1989 MarkObject(child, mark);
1993 // Once the entire group has been marked, dispose it because it's
1994 // not needed anymore.
1997 ref_groups->Rewind(last);
2001 // Mark all objects reachable from the objects on the marking stack.
2002 // Before: the marking stack contains zero or more heap object pointers.
2003 // After: the marking stack is empty, and all objects reachable from the
2004 // marking stack have been marked, or are overflowed in the heap.
2005 void MarkCompactCollector::EmptyMarkingDeque() {
2006 Map* filler_map = heap_->one_pointer_filler_map();
2007 while (!marking_deque_.IsEmpty()) {
2008 HeapObject* object = marking_deque_.Pop();
2009 // Explicitly skip one word fillers. Incremental markbit patterns are
2010 // correct only for objects that occupy at least two words.
2011 Map* map = object->map();
2012 if (map == filler_map) continue;
2014 DCHECK(object->IsHeapObject());
2015 DCHECK(heap()->Contains(object));
2016 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
2018 MarkBit map_mark = Marking::MarkBitFrom(map);
2019 MarkObject(map, map_mark);
2021 MarkCompactMarkingVisitor::IterateBody(map, object);
2026 // Sweep the heap for overflowed objects, clear their overflow bits, and
2027 // push them on the marking stack. Stop early if the marking stack fills
2028 // before sweeping completes. If sweeping completes, there are no remaining
2029 // overflowed objects in the heap so the overflow flag on the markings stack
2031 void MarkCompactCollector::RefillMarkingDeque() {
2032 DCHECK(marking_deque_.overflowed());
2034 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2035 if (marking_deque_.IsFull()) return;
2037 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2038 heap()->old_pointer_space());
2039 if (marking_deque_.IsFull()) return;
2041 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
2042 if (marking_deque_.IsFull()) return;
2044 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
2045 if (marking_deque_.IsFull()) return;
2047 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
2048 if (marking_deque_.IsFull()) return;
2050 DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
2051 if (marking_deque_.IsFull()) return;
2053 DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
2054 heap()->property_cell_space());
2055 if (marking_deque_.IsFull()) return;
2057 LargeObjectIterator lo_it(heap()->lo_space());
2058 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
2059 if (marking_deque_.IsFull()) return;
2061 marking_deque_.ClearOverflowed();
2065 // Mark all objects reachable (transitively) from objects on the marking
2066 // stack. Before: the marking stack contains zero or more heap object
2067 // pointers. After: the marking stack is empty and there are no overflowed
2068 // objects in the heap.
2069 void MarkCompactCollector::ProcessMarkingDeque() {
2070 EmptyMarkingDeque();
2071 while (marking_deque_.overflowed()) {
2072 RefillMarkingDeque();
2073 EmptyMarkingDeque();
2078 // Mark all objects reachable (transitively) from objects on the marking
2079 // stack including references only considered in the atomic marking pause.
2080 void MarkCompactCollector::ProcessEphemeralMarking(
2081 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
2082 bool work_to_do = true;
2083 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
2084 while (work_to_do) {
2085 if (!only_process_harmony_weak_collections) {
2086 isolate()->global_handles()->IterateObjectGroups(
2087 visitor, &IsUnmarkedHeapObjectWithHeap);
2088 MarkImplicitRefGroups();
2090 ProcessWeakCollections();
2091 work_to_do = !marking_deque_.IsEmpty();
2092 ProcessMarkingDeque();
2097 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2098 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2099 !it.done(); it.Advance()) {
2100 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2103 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2104 Code* code = it.frame()->LookupCode();
2105 if (!code->CanDeoptAt(it.frame()->pc())) {
2106 code->CodeIterateBody(visitor);
2108 ProcessMarkingDeque();
2115 void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
2116 if (marking_deque_memory_ == NULL) {
2117 marking_deque_memory_ = new base::VirtualMemory(4 * MB);
2119 if (!marking_deque_memory_committed_) {
2120 if (!marking_deque_memory_->Commit(
2121 reinterpret_cast<Address>(marking_deque_memory_->address()),
2122 marking_deque_memory_->size(),
2123 false)) { // Not executable.
2124 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
2126 marking_deque_memory_committed_ = true;
2127 InitializeMarkingDeque();
2132 void MarkCompactCollector::InitializeMarkingDeque() {
2133 if (marking_deque_memory_committed_) {
2134 Address addr = static_cast<Address>(marking_deque_memory_->address());
2135 size_t size = marking_deque_memory_->size();
2136 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2137 marking_deque_.Initialize(addr, addr + size);
2142 void MarkCompactCollector::UncommitMarkingDeque() {
2143 if (marking_deque_memory_committed_) {
2144 bool success = marking_deque_memory_->Uncommit(
2145 reinterpret_cast<Address>(marking_deque_memory_->address()),
2146 marking_deque_memory_->size());
2148 marking_deque_memory_committed_ = false;
2153 void MarkCompactCollector::OverApproximateWeakClosure() {
2154 GCTracer::Scope gc_scope(heap()->tracer(),
2155 GCTracer::Scope::MC_INCREMENTAL_WEAKCLOSURE);
2157 RootMarkingVisitor root_visitor(heap());
2158 isolate()->global_handles()->IterateObjectGroups(
2159 &root_visitor, &IsUnmarkedHeapObjectWithHeap);
2160 MarkImplicitRefGroups();
2162 // Remove object groups after marking phase.
2163 heap()->isolate()->global_handles()->RemoveObjectGroups();
2164 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2168 void MarkCompactCollector::MarkLiveObjects() {
2169 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2170 double start_time = 0.0;
2171 if (FLAG_print_cumulative_gc_stat) {
2172 start_time = base::OS::TimeCurrentMillis();
2174 // The recursive GC marker detects when it is nearing stack overflow,
2175 // and switches to a different marking system. JS interrupts interfere
2176 // with the C stack limit check.
2177 PostponeInterruptsScope postpone(isolate());
2179 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2180 if (was_marked_incrementally_) {
2181 incremental_marking->Finalize();
2183 // Abort any pending incremental activities e.g. incremental sweeping.
2184 incremental_marking->Abort();
2185 InitializeMarkingDeque();
2189 DCHECK(state_ == PREPARE_GC);
2190 state_ = MARK_LIVE_OBJECTS;
2193 EnsureMarkingDequeIsCommittedAndInitialize();
2195 PrepareForCodeFlushing();
2197 if (was_marked_incrementally_) {
2198 // There is no write barrier on cells so we have to scan them now at the end
2199 // of the incremental marking.
2201 HeapObjectIterator cell_iterator(heap()->cell_space());
2203 while ((cell = cell_iterator.Next()) != NULL) {
2204 DCHECK(cell->IsCell());
2205 if (IsMarked(cell)) {
2206 int offset = Cell::kValueOffset;
2207 MarkCompactMarkingVisitor::VisitPointer(
2208 heap(), reinterpret_cast<Object**>(cell->address() + offset));
2213 HeapObjectIterator js_global_property_cell_iterator(
2214 heap()->property_cell_space());
2216 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2217 DCHECK(cell->IsPropertyCell());
2218 if (IsMarked(cell)) {
2219 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2225 RootMarkingVisitor root_visitor(heap());
2226 MarkRoots(&root_visitor);
2228 ProcessTopOptimizedFrame(&root_visitor);
2231 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
2233 // The objects reachable from the roots are marked, yet unreachable
2234 // objects are unmarked. Mark objects reachable due to host
2235 // application specific logic or through Harmony weak maps.
2236 ProcessEphemeralMarking(&root_visitor, false);
2238 // The objects reachable from the roots, weak maps or object groups
2239 // are marked. Objects pointed to only by weak global handles cannot be
2240 // immediately reclaimed. Instead, we have to mark them as pending and mark
2241 // objects reachable from them.
2243 // First we identify nonlive weak handles and mark them as pending
2245 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2246 &IsUnmarkedHeapObject);
2247 // Then we mark the objects.
2248 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2249 ProcessMarkingDeque();
2251 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2252 // the weak roots we just marked as pending destruction.
2254 // We only process harmony collections, as all object groups have been fully
2255 // processed and no weakly reachable node can discover new objects groups.
2256 ProcessEphemeralMarking(&root_visitor, true);
2261 if (FLAG_print_cumulative_gc_stat) {
2262 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2267 void MarkCompactCollector::AfterMarking() {
2268 // Prune the string table removing all strings only pointed to by the
2269 // string table. Cannot use string_table() here because the string
2271 StringTable* string_table = heap()->string_table();
2272 InternalizedStringTableCleaner internalized_visitor(heap());
2273 string_table->IterateElements(&internalized_visitor);
2274 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2276 ExternalStringTableCleaner external_visitor(heap());
2277 heap()->external_string_table_.Iterate(&external_visitor);
2278 heap()->external_string_table_.CleanUp();
2280 // Process the weak references.
2281 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2282 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2284 // Remove object groups after marking phase.
2285 heap()->isolate()->global_handles()->RemoveObjectGroups();
2286 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2288 // Flush code from collected candidates.
2289 if (is_code_flushing_enabled()) {
2290 code_flusher_->ProcessCandidates();
2291 // If incremental marker does not support code flushing, we need to
2292 // disable it before incremental marking steps for next cycle.
2293 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2294 EnableCodeFlushing(false);
2298 if (FLAG_track_gc_object_stats) {
2299 heap()->CheckpointObjectStats();
2304 void MarkCompactCollector::ClearNonLiveReferences() {
2305 // Iterate over the map space, setting map transitions that go from
2306 // a marked map to an unmarked map to null transitions. This action
2307 // is carried out only on maps of JSObjects and related subtypes.
2308 HeapObjectIterator map_iterator(heap()->map_space());
2309 for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2310 obj = map_iterator.Next()) {
2311 Map* map = Map::cast(obj);
2313 if (!map->CanTransition()) continue;
2315 MarkBit map_mark = Marking::MarkBitFrom(map);
2316 ClearNonLivePrototypeTransitions(map);
2317 ClearNonLiveMapTransitions(map, map_mark);
2319 if (!map_mark.Get()) {
2320 have_code_to_deoptimize_ |=
2321 map->dependent_code()->MarkCodeForDeoptimization(
2322 isolate(), DependentCode::kWeakCodeGroup);
2323 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2327 WeakHashTable* table = heap_->weak_object_to_code_table();
2328 uint32_t capacity = table->Capacity();
2329 for (uint32_t i = 0; i < capacity; i++) {
2330 uint32_t key_index = table->EntryToIndex(i);
2331 Object* key = table->get(key_index);
2332 if (!table->IsKey(key)) continue;
2333 uint32_t value_index = table->EntryToValueIndex(i);
2334 Object* value = table->get(value_index);
2335 if (WeakCell::cast(key)->cleared()) {
2336 have_code_to_deoptimize_ |=
2337 DependentCode::cast(value)->MarkCodeForDeoptimization(
2338 isolate(), DependentCode::kWeakCodeGroup);
2339 table->set(key_index, heap_->the_hole_value());
2340 table->set(value_index, heap_->the_hole_value());
2341 table->ElementRemoved();
2347 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2348 int number_of_transitions = map->NumberOfProtoTransitions();
2349 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2351 const int header = Map::kProtoTransitionHeaderSize;
2352 int new_number_of_transitions = 0;
2353 for (int i = 0; i < number_of_transitions; i++) {
2354 Object* cached_map = prototype_transitions->get(header + i);
2355 if (IsMarked(cached_map)) {
2356 if (new_number_of_transitions != i) {
2357 prototype_transitions->set(header + new_number_of_transitions,
2358 cached_map, SKIP_WRITE_BARRIER);
2360 new_number_of_transitions++;
2364 if (new_number_of_transitions != number_of_transitions) {
2365 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2368 // Fill slots that became free with undefined value.
2369 for (int i = new_number_of_transitions; i < number_of_transitions; i++) {
2370 prototype_transitions->set_undefined(header + i);
2375 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2377 Object* potential_parent = map->GetBackPointer();
2378 if (!potential_parent->IsMap()) return;
2379 Map* parent = Map::cast(potential_parent);
2381 // Follow back pointer, check whether we are dealing with a map transition
2382 // from a live map to a dead path and in case clear transitions of parent.
2383 bool current_is_alive = map_mark.Get();
2384 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2385 if (!current_is_alive && parent_is_alive) {
2386 ClearMapTransitions(parent);
2391 // Clear a possible back pointer in case the transition leads to a dead map.
2392 // Return true in case a back pointer has been cleared and false otherwise.
2393 bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
2394 if (Marking::MarkBitFrom(target).Get()) return false;
2395 target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
2400 void MarkCompactCollector::ClearMapTransitions(Map* map) {
2401 // If there are no transitions to be cleared, return.
2402 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2403 // properly cleared.
2404 if (!map->HasTransitionArray()) return;
2406 TransitionArray* t = map->transitions();
2408 int transition_index = 0;
2410 DescriptorArray* descriptors = map->instance_descriptors();
2411 bool descriptors_owner_died = false;
2413 // Compact all live descriptors to the left.
2414 for (int i = 0; i < t->number_of_transitions(); ++i) {
2415 Map* target = t->GetTarget(i);
2416 if (ClearMapBackPointer(target)) {
2417 if (target->instance_descriptors() == descriptors) {
2418 descriptors_owner_died = true;
2421 if (i != transition_index) {
2422 Name* key = t->GetKey(i);
2423 t->SetKey(transition_index, key);
2424 Object** key_slot = t->GetKeySlot(transition_index);
2425 RecordSlot(key_slot, key_slot, key);
2426 // Target slots do not need to be recorded since maps are not compacted.
2427 t->SetTarget(transition_index, t->GetTarget(i));
2433 // If there are no transitions to be cleared, return.
2434 // TODO(verwaest) Should be an assert, otherwise back pointers are not
2435 // properly cleared.
2436 if (transition_index == t->number_of_transitions()) return;
2438 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2440 if (descriptors_owner_died) {
2441 if (number_of_own_descriptors > 0) {
2442 TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
2443 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2444 map->set_owns_descriptors(true);
2446 DCHECK(descriptors == heap_->empty_descriptor_array());
2450 // Note that we never eliminate a transition array, though we might right-trim
2451 // such that number_of_transitions() == 0. If this assumption changes,
2452 // TransitionArray::Insert() will need to deal with the case that a transition
2453 // array disappeared during GC.
2454 int trim = t->number_of_transitions_storage() - transition_index;
2456 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2457 t, t->IsSimpleTransition() ? trim
2458 : trim * TransitionArray::kTransitionSize);
2459 t->SetNumberOfTransitions(transition_index);
2461 DCHECK(map->HasTransitionArray());
2465 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2466 DescriptorArray* descriptors,
2467 int number_of_own_descriptors) {
2468 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2469 int to_trim = number_of_descriptors - number_of_own_descriptors;
2470 if (to_trim == 0) return;
2472 heap_->RightTrimFixedArray<Heap::FROM_GC>(
2473 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2474 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2476 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2477 descriptors->Sort();
2481 void MarkCompactCollector::TrimEnumCache(Map* map,
2482 DescriptorArray* descriptors) {
2483 int live_enum = map->EnumLength();
2484 if (live_enum == kInvalidEnumCacheSentinel) {
2485 live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
2487 if (live_enum == 0) return descriptors->ClearEnumCache();
2489 FixedArray* enum_cache = descriptors->GetEnumCache();
2491 int to_trim = enum_cache->length() - live_enum;
2492 if (to_trim <= 0) return;
2493 heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
2496 if (!descriptors->HasEnumIndicesCache()) return;
2497 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2498 heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
2502 void MarkCompactCollector::ProcessWeakCollections() {
2503 GCTracer::Scope gc_scope(heap()->tracer(),
2504 GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2505 Object* weak_collection_obj = heap()->encountered_weak_collections();
2506 while (weak_collection_obj != Smi::FromInt(0)) {
2507 JSWeakCollection* weak_collection =
2508 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2509 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2510 if (weak_collection->table()->IsHashTable()) {
2511 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2512 Object** anchor = reinterpret_cast<Object**>(table->address());
2513 for (int i = 0; i < table->Capacity(); i++) {
2514 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2516 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2517 RecordSlot(anchor, key_slot, *key_slot);
2518 Object** value_slot =
2519 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2520 MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
2525 weak_collection_obj = weak_collection->next();
2530 void MarkCompactCollector::ClearWeakCollections() {
2531 GCTracer::Scope gc_scope(heap()->tracer(),
2532 GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2533 Object* weak_collection_obj = heap()->encountered_weak_collections();
2534 while (weak_collection_obj != Smi::FromInt(0)) {
2535 JSWeakCollection* weak_collection =
2536 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2537 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2538 if (weak_collection->table()->IsHashTable()) {
2539 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2540 for (int i = 0; i < table->Capacity(); i++) {
2541 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2542 if (!MarkCompactCollector::IsMarked(key)) {
2543 table->RemoveEntry(i);
2547 weak_collection_obj = weak_collection->next();
2548 weak_collection->set_next(heap()->undefined_value());
2550 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2554 void MarkCompactCollector::AbortWeakCollections() {
2555 GCTracer::Scope gc_scope(heap()->tracer(),
2556 GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
2557 Object* weak_collection_obj = heap()->encountered_weak_collections();
2558 while (weak_collection_obj != Smi::FromInt(0)) {
2559 JSWeakCollection* weak_collection =
2560 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2561 weak_collection_obj = weak_collection->next();
2562 weak_collection->set_next(heap()->undefined_value());
2564 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2568 void MarkCompactCollector::ProcessAndClearWeakCells() {
2569 HeapObject* undefined = heap()->undefined_value();
2570 Object* weak_cell_obj = heap()->encountered_weak_cells();
2571 while (weak_cell_obj != Smi::FromInt(0)) {
2572 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2573 // We do not insert cleared weak cells into the list, so the value
2574 // cannot be a Smi here.
2575 HeapObject* value = HeapObject::cast(weak_cell->value());
2576 if (!MarkCompactCollector::IsMarked(value)) {
2577 // Cells for new-space objects embedded in optimized code are wrapped in
2578 // WeakCell and put into Heap::weak_object_to_code_table.
2579 // Such cells do not have any strong references but we want to keep them
2580 // alive as long as the cell value is alive.
2581 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2582 if (value->IsCell()) {
2583 Object* cell_value = Cell::cast(value)->value();
2584 if (cell_value->IsHeapObject() &&
2585 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2586 // Resurrect the cell.
2587 MarkBit mark = Marking::MarkBitFrom(value);
2588 SetMark(value, mark);
2589 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2590 RecordSlot(slot, slot, *slot);
2591 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2592 RecordSlot(slot, slot, *slot);
2600 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2601 RecordSlot(slot, slot, *slot);
2603 weak_cell_obj = weak_cell->next();
2604 weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
2606 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2610 void MarkCompactCollector::AbortWeakCells() {
2611 Object* undefined = heap()->undefined_value();
2612 Object* weak_cell_obj = heap()->encountered_weak_cells();
2613 while (weak_cell_obj != Smi::FromInt(0)) {
2614 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2615 weak_cell_obj = weak_cell->next();
2616 weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
2618 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2622 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
2623 if (heap_->InNewSpace(value)) {
2624 heap_->store_buffer()->Mark(slot);
2625 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2626 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2627 reinterpret_cast<Object**>(slot),
2628 SlotsBuffer::IGNORE_OVERFLOW);
2633 // We scavenge new space simultaneously with sweeping. This is done in two
2636 // The first pass migrates all alive objects from one semispace to another or
2637 // promotes them to old space. Forwarding address is written directly into
2638 // first word of object without any encoding. If object is dead we write
2639 // NULL as a forwarding address.
2641 // The second pass updates pointers to new space in all spaces. It is possible
2642 // to encounter pointers to dead new space objects during traversal of pointers
2643 // to new space. We should clear them to avoid encountering them during next
2644 // pointer iteration. This is an issue if the store buffer overflows and we
2645 // have to scan the entire old space, including dead objects, looking for
2646 // pointers to new space.
2647 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2648 int size, AllocationSpace dest) {
2649 Address dst_addr = dst->address();
2650 Address src_addr = src->address();
2651 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2652 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2653 if (dest == OLD_POINTER_SPACE) {
2654 Address src_slot = src_addr;
2655 Address dst_slot = dst_addr;
2656 DCHECK(IsAligned(size, kPointerSize));
2658 bool may_contain_raw_values = src->MayContainRawValues();
2659 #if V8_DOUBLE_FIELDS_UNBOXING
2660 LayoutDescriptorHelper helper(src->map());
2661 bool has_only_tagged_fields = helper.all_fields_tagged();
2663 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2664 Object* value = Memory::Object_at(src_slot);
2666 Memory::Object_at(dst_slot) = value;
2668 #if V8_DOUBLE_FIELDS_UNBOXING
2669 if (!may_contain_raw_values &&
2670 (has_only_tagged_fields ||
2671 helper.IsTagged(static_cast<int>(src_slot - src_addr))))
2673 if (!may_contain_raw_values)
2676 RecordMigratedSlot(value, dst_slot);
2679 src_slot += kPointerSize;
2680 dst_slot += kPointerSize;
2683 if (compacting_ && dst->IsJSFunction()) {
2684 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2685 Address code_entry = Memory::Address_at(code_entry_slot);
2687 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2688 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2689 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2690 SlotsBuffer::IGNORE_OVERFLOW);
2692 } else if (dst->IsConstantPoolArray()) {
2693 // We special case ConstantPoolArrays since they could contain integers
2694 // value entries which look like tagged pointers.
2695 // TODO(mstarzinger): restructure this code to avoid this special-casing.
2696 ConstantPoolArray* array = ConstantPoolArray::cast(dst);
2697 ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
2698 while (!code_iter.is_finished()) {
2699 Address code_entry_slot =
2700 dst_addr + array->OffsetOfElementAt(code_iter.next_index());
2701 Address code_entry = Memory::Address_at(code_entry_slot);
2703 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2704 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2705 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2706 SlotsBuffer::IGNORE_OVERFLOW);
2709 ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
2710 while (!heap_iter.is_finished()) {
2712 dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
2713 Object* value = Memory::Object_at(heap_slot);
2714 RecordMigratedSlot(value, heap_slot);
2717 } else if (dest == CODE_SPACE) {
2718 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2719 heap()->MoveBlock(dst_addr, src_addr, size);
2720 SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
2721 SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
2722 SlotsBuffer::IGNORE_OVERFLOW);
2723 Code::cast(dst)->Relocate(dst_addr - src_addr);
2725 DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2726 heap()->MoveBlock(dst_addr, src_addr, size);
2728 heap()->OnMoveEvent(dst, src, size);
2729 Memory::Address_at(src_addr) = dst_addr;
2733 // Visitor for updating pointers from live objects in old spaces to new space.
2734 // It does not expect to encounter pointers to dead objects.
2735 class PointersUpdatingVisitor : public ObjectVisitor {
2737 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2739 void VisitPointer(Object** p) { UpdatePointer(p); }
2741 void VisitPointers(Object** start, Object** end) {
2742 for (Object** p = start; p < end; p++) UpdatePointer(p);
2745 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2746 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2747 Object* target = rinfo->target_object();
2748 Object* old_target = target;
2749 VisitPointer(&target);
2750 // Avoid unnecessary changes that might unnecessary flush the instruction
2752 if (target != old_target) {
2753 rinfo->set_target_object(target);
2757 void VisitCodeTarget(RelocInfo* rinfo) {
2758 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2759 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2760 Object* old_target = target;
2761 VisitPointer(&target);
2762 if (target != old_target) {
2763 rinfo->set_target_address(Code::cast(target)->instruction_start());
2767 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2768 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2769 Object* stub = rinfo->code_age_stub();
2770 DCHECK(stub != NULL);
2771 VisitPointer(&stub);
2772 if (stub != rinfo->code_age_stub()) {
2773 rinfo->set_code_age_stub(Code::cast(stub));
2777 void VisitDebugTarget(RelocInfo* rinfo) {
2778 DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2779 rinfo->IsPatchedReturnSequence()) ||
2780 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2781 rinfo->IsPatchedDebugBreakSlotSequence()));
2782 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2783 VisitPointer(&target);
2784 rinfo->set_call_address(Code::cast(target)->instruction_start());
2787 static inline void UpdateSlot(Heap* heap, Object** slot) {
2788 Object* obj = reinterpret_cast<Object*>(
2789 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
2791 if (!obj->IsHeapObject()) return;
2793 HeapObject* heap_obj = HeapObject::cast(obj);
2795 // TODO(ishell): remove, once crbug/454297 is caught.
2796 #if V8_TARGET_ARCH_64_BIT
2797 const uintptr_t kBoundary = V8_UINT64_C(1) << 48;
2798 STATIC_ASSERT(kBoundary > 0);
2799 if (reinterpret_cast<uintptr_t>(heap_obj->address()) >= kBoundary) {
2800 CheckLayoutDescriptorAndDie(heap, slot);
2803 MapWord map_word = heap_obj->map_word();
2804 if (map_word.IsForwardingAddress()) {
2805 DCHECK(heap->InFromSpace(heap_obj) ||
2806 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2807 HeapObject* target = map_word.ToForwardingAddress();
2808 base::NoBarrier_CompareAndSwap(
2809 reinterpret_cast<base::AtomicWord*>(slot),
2810 reinterpret_cast<base::AtomicWord>(obj),
2811 reinterpret_cast<base::AtomicWord>(target));
2812 DCHECK(!heap->InFromSpace(target) &&
2813 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2818 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2820 static void CheckLayoutDescriptorAndDie(Heap* heap, Object** slot);
2826 #if V8_TARGET_ARCH_64_BIT
2827 // TODO(ishell): remove, once crbug/454297 is caught.
2828 void PointersUpdatingVisitor::CheckLayoutDescriptorAndDie(Heap* heap,
2830 const int kDataBufferSize = 128;
2831 uintptr_t data[kDataBufferSize] = {0};
2833 data[index++] = 0x10aaaaaaaaUL; // begin marker
2835 data[index++] = reinterpret_cast<uintptr_t>(slot);
2836 data[index++] = 0x15aaaaaaaaUL;
2838 Address slot_address = reinterpret_cast<Address>(slot);
2840 uintptr_t space_owner_id = 0xb001;
2841 if (heap->new_space()->ToSpaceContains(slot_address)) {
2843 } else if (heap->new_space()->FromSpaceContains(slot_address)) {
2845 } else if (heap->old_pointer_space()->ContainsSafe(slot_address)) {
2847 } else if (heap->old_data_space()->ContainsSafe(slot_address)) {
2849 } else if (heap->code_space()->ContainsSafe(slot_address)) {
2851 } else if (heap->map_space()->ContainsSafe(slot_address)) {
2853 } else if (heap->cell_space()->ContainsSafe(slot_address)) {
2855 } else if (heap->property_cell_space()->ContainsSafe(slot_address)) {
2858 // Lo space or other.
2861 data[index++] = space_owner_id;
2862 data[index++] = 0x20aaaaaaaaUL;
2864 // Find map word lying near before the slot address (usually the map word is
2865 // at -3 words from the slot but just in case we look up further.
2866 Object** map_slot = slot;
2868 const int kMaxDistanceToMap = 64;
2869 for (int i = 0; i < kMaxDistanceToMap; i++, map_slot--) {
2870 Address map_address = reinterpret_cast<Address>(*map_slot);
2871 if (heap->map_space()->ContainsSafe(map_address)) {
2876 data[index++] = found;
2877 data[index++] = 0x30aaaaaaaaUL;
2878 data[index++] = reinterpret_cast<uintptr_t>(map_slot);
2879 data[index++] = 0x35aaaaaaaaUL;
2882 Address obj_address = reinterpret_cast<Address>(map_slot);
2883 Address end_of_page =
2884 reinterpret_cast<Address>(Page::FromAddress(obj_address)) +
2886 Address end_address =
2887 Min(obj_address + kPointerSize * kMaxDistanceToMap, end_of_page);
2888 int size = static_cast<int>(end_address - obj_address);
2889 data[index++] = size / kPointerSize;
2890 data[index++] = 0x40aaaaaaaaUL;
2891 memcpy(&data[index], reinterpret_cast<void*>(map_slot), size);
2892 index += size / kPointerSize;
2893 data[index++] = 0x50aaaaaaaaUL;
2895 HeapObject* object = HeapObject::FromAddress(obj_address);
2896 data[index++] = reinterpret_cast<uintptr_t>(object);
2897 data[index++] = 0x60aaaaaaaaUL;
2899 Map* map = object->map();
2900 data[index++] = reinterpret_cast<uintptr_t>(map);
2901 data[index++] = 0x70aaaaaaaaUL;
2903 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2904 data[index++] = reinterpret_cast<uintptr_t>(layout_descriptor);
2905 data[index++] = 0x80aaaaaaaaUL;
2907 memcpy(&data[index], reinterpret_cast<void*>(map->address()), Map::kSize);
2908 index += Map::kSize / kPointerSize;
2909 data[index++] = 0x90aaaaaaaaUL;
2912 data[index++] = 0xeeeeeeeeeeUL;
2913 DCHECK(index < kDataBufferSize);
2914 base::OS::PrintError("Data: %p\n", static_cast<void*>(data));
2920 static void UpdatePointer(HeapObject** address, HeapObject* object) {
2921 Address new_addr = Memory::Address_at(object->address());
2923 // The new space sweep will overwrite the map word of dead objects
2924 // with NULL. In this case we do not need to transfer this entry to
2925 // the store buffer which we are rebuilding.
2926 // We perform the pointer update with a no barrier compare-and-swap. The
2927 // compare and swap may fail in the case where the pointer update tries to
2928 // update garbage memory which was concurrently accessed by the sweeper.
2929 if (new_addr != NULL) {
2930 base::NoBarrier_CompareAndSwap(
2931 reinterpret_cast<base::AtomicWord*>(address),
2932 reinterpret_cast<base::AtomicWord>(object),
2933 reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
2938 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2940 MapWord map_word = HeapObject::cast(*p)->map_word();
2942 if (map_word.IsForwardingAddress()) {
2943 return String::cast(map_word.ToForwardingAddress());
2946 return String::cast(*p);
2950 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2952 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2954 OldSpace* target_space = heap()->TargetSpace(object);
2956 DCHECK(target_space == heap()->old_pointer_space() ||
2957 target_space == heap()->old_data_space());
2959 AllocationResult allocation = target_space->AllocateRaw(object_size);
2960 if (allocation.To(&target)) {
2961 MigrateObject(target, object, object_size, target_space->identity());
2962 heap()->IncrementPromotedObjectsSize(object_size);
2970 void MarkCompactCollector::EvacuateNewSpace() {
2971 // There are soft limits in the allocation code, designed trigger a mark
2972 // sweep collection by failing allocations. But since we are already in
2973 // a mark-sweep allocation, there is no sense in trying to trigger one.
2974 AlwaysAllocateScope scope(isolate());
2976 NewSpace* new_space = heap()->new_space();
2978 // Store allocation range before flipping semispaces.
2979 Address from_bottom = new_space->bottom();
2980 Address from_top = new_space->top();
2982 // Flip the semispaces. After flipping, to space is empty, from space has
2985 new_space->ResetAllocationInfo();
2987 int survivors_size = 0;
2989 // First pass: traverse all objects in inactive semispace, remove marks,
2990 // migrate live objects and write forwarding addresses. This stage puts
2991 // new entries in the store buffer and may cause some pages to be marked
2992 // scan-on-scavenge.
2993 NewSpacePageIterator it(from_bottom, from_top);
2994 while (it.has_next()) {
2995 NewSpacePage* p = it.next();
2996 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
2999 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3000 new_space->set_age_mark(new_space->top());
3004 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3005 AlwaysAllocateScope always_allocate(isolate());
3006 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3007 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3012 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3013 Address cell_base = it.CurrentCellBase();
3014 MarkBit::CellType* cell = it.CurrentCell();
3016 if (*cell == 0) continue;
3018 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3019 for (int i = 0; i < live_objects; i++) {
3020 Address object_addr = cell_base + offsets[i] * kPointerSize;
3021 HeapObject* object = HeapObject::FromAddress(object_addr);
3022 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3024 int size = object->Size();
3026 HeapObject* target_object;
3027 AllocationResult allocation = space->AllocateRaw(size);
3028 if (!allocation.To(&target_object)) {
3029 // If allocation failed, use emergency memory and re-try allocation.
3030 CHECK(space->HasEmergencyMemory());
3031 space->UseEmergencyMemory();
3032 allocation = space->AllocateRaw(size);
3034 if (!allocation.To(&target_object)) {
3035 // OS refused to give us memory.
3036 V8::FatalProcessOutOfMemory("Evacuation");
3040 MigrateObject(target_object, object, size, space->identity());
3041 DCHECK(object->map_word().IsForwardingAddress());
3044 // Clear marking bits for current cell.
3047 p->ResetLiveBytes();
3051 void MarkCompactCollector::EvacuatePages() {
3052 int npages = evacuation_candidates_.length();
3053 for (int i = 0; i < npages; i++) {
3054 Page* p = evacuation_candidates_[i];
3055 DCHECK(p->IsEvacuationCandidate() ||
3056 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3057 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3058 MemoryChunk::SWEEPING_DONE);
3059 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3060 // Allocate emergency memory for the case when compaction fails due to out
3062 if (!space->HasEmergencyMemory()) {
3063 space->CreateEmergencyMemory();
3065 if (p->IsEvacuationCandidate()) {
3066 // During compaction we might have to request a new page. Check that we
3067 // have an emergency page and the space still has room for that.
3068 if (space->HasEmergencyMemory() && space->CanExpand()) {
3069 EvacuateLiveObjectsFromPage(p);
3070 // Unlink the page from the list of pages here. We must not iterate
3071 // over that page later (e.g. when scan on scavenge pages are
3072 // processed). The page itself will be freed later and is still
3073 // reachable from the evacuation candidates list.
3076 // Without room for expansion evacuation is not guaranteed to succeed.
3077 // Pessimistically abandon unevacuated pages.
3078 for (int j = i; j < npages; j++) {
3079 Page* page = evacuation_candidates_[j];
3080 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3081 page->ClearEvacuationCandidate();
3082 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3089 // Release emergency memory.
3090 PagedSpaces spaces(heap());
3091 for (PagedSpace* space = spaces.next(); space != NULL;
3092 space = spaces.next()) {
3093 if (space->HasEmergencyMemory()) {
3094 space->FreeEmergencyMemory();
3101 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3103 virtual Object* RetainAs(Object* object) {
3104 if (object->IsHeapObject()) {
3105 HeapObject* heap_object = HeapObject::cast(object);
3106 MapWord map_word = heap_object->map_word();
3107 if (map_word.IsForwardingAddress()) {
3108 return map_word.ToForwardingAddress();
3116 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
3117 SlotsBuffer::SlotType slot_type, Address addr) {
3118 switch (slot_type) {
3119 case SlotsBuffer::CODE_TARGET_SLOT: {
3120 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3121 rinfo.Visit(isolate, v);
3124 case SlotsBuffer::CODE_ENTRY_SLOT: {
3125 v->VisitCodeEntry(addr);
3128 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3129 HeapObject* obj = HeapObject::FromAddress(addr);
3130 Code::cast(obj)->CodeIterateBody(v);
3133 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3134 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3135 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3138 case SlotsBuffer::JS_RETURN_SLOT: {
3139 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3140 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3143 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3144 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3145 rinfo.Visit(isolate, v);
3155 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3158 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3161 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3164 template <MarkCompactCollector::SweepingParallelism mode>
3165 static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3167 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3168 DCHECK(free_list == NULL);
3169 return space->Free(start, size);
3171 // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
3172 return size - free_list->Free(start, size);
3177 // Sweeps a page. After sweeping the page can be iterated.
3178 // Slots in live objects pointing into evacuation candidates are updated
3180 // Returns the size of the biggest continuous freed memory chunk in bytes.
3181 template <SweepingMode sweeping_mode,
3182 MarkCompactCollector::SweepingParallelism parallelism,
3183 SkipListRebuildingMode skip_list_mode,
3184 FreeSpaceTreatmentMode free_space_mode>
3185 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3187 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3188 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3189 space->identity() == CODE_SPACE);
3190 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3191 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3192 sweeping_mode == SWEEP_ONLY);
3194 Address free_start = p->area_start();
3195 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3198 SkipList* skip_list = p->skip_list();
3199 int curr_region = -1;
3200 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3204 intptr_t freed_bytes = 0;
3205 intptr_t max_freed_bytes = 0;
3207 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3208 Address cell_base = it.CurrentCellBase();
3209 MarkBit::CellType* cell = it.CurrentCell();
3210 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3212 for (; live_objects != 0; live_objects--) {
3213 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3214 if (free_end != free_start) {
3215 int size = static_cast<int>(free_end - free_start);
3216 if (free_space_mode == ZAP_FREE_SPACE) {
3217 memset(free_start, 0xcc, size);
3219 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3220 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3221 #ifdef ENABLE_GDB_JIT_INTERFACE
3222 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3223 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3227 HeapObject* live_object = HeapObject::FromAddress(free_end);
3228 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3229 Map* map = live_object->synchronized_map();
3230 int size = live_object->SizeFromMap(map);
3231 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3232 live_object->IterateBody(map->instance_type(), size, v);
3234 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3235 int new_region_start = SkipList::RegionNumber(free_end);
3236 int new_region_end =
3237 SkipList::RegionNumber(free_end + size - kPointerSize);
3238 if (new_region_start != curr_region || new_region_end != curr_region) {
3239 skip_list->AddObject(free_end, size);
3240 curr_region = new_region_end;
3243 free_start = free_end + size;
3245 // Clear marking bits for current cell.
3248 if (free_start != p->area_end()) {
3249 int size = static_cast<int>(p->area_end() - free_start);
3250 if (free_space_mode == ZAP_FREE_SPACE) {
3251 memset(free_start, 0xcc, size);
3253 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3254 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3255 #ifdef ENABLE_GDB_JIT_INTERFACE
3256 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3257 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3261 p->ResetLiveBytes();
3263 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3264 // When concurrent sweeping is active, the page will be marked after
3265 // sweeping by the main thread.
3266 p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
3270 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3274 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3275 Page* p = Page::FromAddress(code->address());
3277 if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3281 Address code_start = code->address();
3282 Address code_end = code_start + code->Size();
3284 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3285 uint32_t end_index =
3286 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3288 Bitmap* b = p->markbits();
3290 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3291 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3293 MarkBit::CellType* start_cell = start_mark_bit.cell();
3294 MarkBit::CellType* end_cell = end_mark_bit.cell();
3297 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3298 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3300 if (start_cell == end_cell) {
3301 *start_cell |= start_mask & end_mask;
3303 *start_cell |= start_mask;
3304 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3307 *end_cell |= end_mask;
3310 for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
3319 static bool IsOnInvalidatedCodeObject(Address addr) {
3320 // We did not record any slots in large objects thus
3321 // we can safely go to the page from the slot address.
3322 Page* p = Page::FromAddress(addr);
3324 // First check owner's identity because old pointer and old data spaces
3325 // are swept lazily and might still have non-zero mark-bits on some
3327 if (p->owner()->identity() != CODE_SPACE) return false;
3329 // In code space only bits on evacuation candidates (but we don't record
3330 // any slots on them) and under invalidated code objects are non-zero.
3332 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3334 return mark_bit.Get();
3338 void MarkCompactCollector::InvalidateCode(Code* code) {
3339 if (heap_->incremental_marking()->IsCompacting() &&
3340 !ShouldSkipEvacuationSlotRecording(code)) {
3341 DCHECK(compacting_);
3343 // If the object is white than no slots were recorded on it yet.
3344 MarkBit mark_bit = Marking::MarkBitFrom(code);
3345 if (Marking::IsWhite(mark_bit)) return;
3347 invalidated_code_.Add(code);
3352 // Return true if the given code is deoptimized or will be deoptimized.
3353 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3354 return code->is_optimized_code() && code->marked_for_deoptimization();
3358 bool MarkCompactCollector::MarkInvalidatedCode() {
3359 bool code_marked = false;
3361 int length = invalidated_code_.length();
3362 for (int i = 0; i < length; i++) {
3363 Code* code = invalidated_code_[i];
3365 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3374 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3375 int length = invalidated_code_.length();
3376 for (int i = 0; i < length; i++) {
3377 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3382 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3383 int length = invalidated_code_.length();
3384 for (int i = 0; i < length; i++) {
3385 Code* code = invalidated_code_[i];
3387 code->Iterate(visitor);
3388 SetMarkBitsUnderInvalidatedCode(code, false);
3391 invalidated_code_.Rewind(0);
3395 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3396 Heap::RelocationLock relocation_lock(heap());
3398 bool code_slots_filtering_required;
3400 GCTracer::Scope gc_scope(heap()->tracer(),
3401 GCTracer::Scope::MC_SWEEP_NEWSPACE);
3402 code_slots_filtering_required = MarkInvalidatedCode();
3407 GCTracer::Scope gc_scope(heap()->tracer(),
3408 GCTracer::Scope::MC_EVACUATE_PAGES);
3409 EvacuationScope evacuation_scope(this);
3413 // Second pass: find pointers to new space and update them.
3414 PointersUpdatingVisitor updating_visitor(heap());
3417 GCTracer::Scope gc_scope(heap()->tracer(),
3418 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3419 // Update pointers in to space.
3420 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3421 heap()->new_space()->top());
3422 for (HeapObject* object = to_it.Next(); object != NULL;
3423 object = to_it.Next()) {
3424 Map* map = object->map();
3425 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3431 GCTracer::Scope gc_scope(heap()->tracer(),
3432 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3434 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3438 GCTracer::Scope gc_scope(heap()->tracer(),
3439 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3440 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3441 &Heap::ScavengeStoreBufferCallback);
3442 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3447 GCTracer::Scope gc_scope(heap()->tracer(),
3448 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3449 SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
3450 code_slots_filtering_required);
3451 if (FLAG_trace_fragmentation) {
3452 PrintF(" migration slots buffer: %d\n",
3453 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3456 if (compacting_ && was_marked_incrementally_) {
3457 // It's difficult to filter out slots recorded for large objects.
3458 LargeObjectIterator it(heap_->lo_space());
3459 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3460 // LargeObjectSpace is not swept yet thus we have to skip
3461 // dead objects explicitly.
3462 if (!IsMarked(obj)) continue;
3464 Page* p = Page::FromAddress(obj->address());
3465 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3466 obj->Iterate(&updating_visitor);
3467 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3473 int npages = evacuation_candidates_.length();
3475 GCTracer::Scope gc_scope(
3477 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3478 for (int i = 0; i < npages; i++) {
3479 Page* p = evacuation_candidates_[i];
3480 DCHECK(p->IsEvacuationCandidate() ||
3481 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3483 if (p->IsEvacuationCandidate()) {
3484 SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
3485 code_slots_filtering_required);
3486 if (FLAG_trace_fragmentation) {
3487 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3488 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3491 // Important: skip list should be cleared only after roots were updated
3492 // because root iteration traverses the stack and might have to find
3493 // code objects from non-updated pc pointing into evacuation candidate.
3494 SkipList* list = p->skip_list();
3495 if (list != NULL) list->Clear();
3497 if (FLAG_gc_verbose) {
3498 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3499 reinterpret_cast<intptr_t>(p));
3501 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3502 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3504 switch (space->identity()) {
3505 case OLD_DATA_SPACE:
3506 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3507 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3510 case OLD_POINTER_SPACE:
3511 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3512 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3516 if (FLAG_zap_code_space) {
3517 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3518 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3521 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3522 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3534 GCTracer::Scope gc_scope(heap()->tracer(),
3535 GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3537 // Update pointers from cells.
3538 HeapObjectIterator cell_iterator(heap_->cell_space());
3539 for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
3540 cell = cell_iterator.Next()) {
3541 if (cell->IsCell()) {
3542 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3546 HeapObjectIterator js_global_property_cell_iterator(
3547 heap_->property_cell_space());
3548 for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
3549 cell = js_global_property_cell_iterator.Next()) {
3550 if (cell->IsPropertyCell()) {
3551 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3555 heap_->string_table()->Iterate(&updating_visitor);
3557 // Update pointers from external string table.
3558 heap_->UpdateReferencesInExternalStringTable(
3559 &UpdateReferenceInExternalStringTableEntry);
3561 EvacuationWeakObjectRetainer evacuation_object_retainer;
3562 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3564 // Collects callback info for handles that are pending (about to be
3565 // collected) and either phantom or internal-fields. Releases the global
3566 // handles. See also PostGarbageCollectionProcessing.
3567 isolate()->global_handles()->CollectAllPhantomCallbackData();
3569 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3571 ProcessInvalidatedCode(&updating_visitor);
3573 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3575 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3576 DCHECK(migration_slots_buffer_ == NULL);
3578 // The hashing of weak_object_to_code_table is no longer valid.
3579 heap()->weak_object_to_code_table()->Rehash(
3580 heap()->isolate()->factory()->undefined_value());
3584 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3585 int npages = evacuation_candidates_.length();
3586 for (int i = 0; i < npages; i++) {
3587 Page* p = evacuation_candidates_[i];
3588 if (!p->IsEvacuationCandidate()) continue;
3590 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3591 p->InsertAfter(space->LastPage());
3596 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3597 int npages = evacuation_candidates_.length();
3598 for (int i = 0; i < npages; i++) {
3599 Page* p = evacuation_candidates_[i];
3600 if (!p->IsEvacuationCandidate()) continue;
3601 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3602 space->Free(p->area_start(), p->area_size());
3603 p->set_scan_on_scavenge(false);
3604 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3605 p->ResetLiveBytes();
3606 space->ReleasePage(p);
3608 evacuation_candidates_.Rewind(0);
3609 compacting_ = false;
3610 heap()->FreeQueuedChunks();
3614 static const int kStartTableEntriesPerLine = 5;
3615 static const int kStartTableLines = 171;
3616 static const int kStartTableInvalidLine = 127;
3617 static const int kStartTableUnusedEntry = 126;
3619 #define _ kStartTableUnusedEntry
3620 #define X kStartTableInvalidLine
3621 // Mark-bit to object start offset table.
3623 // The line is indexed by the mark bits in a byte. The first number on
3624 // the line describes the number of live object starts for the line and the
3625 // other numbers on the line describe the offsets (in words) of the object
3628 // Since objects are at least 2 words large we don't have entries for two
3629 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3630 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3978 // Takes a word of mark bits. Returns the number of objects that start in the
3979 // range. Puts the offsets of the words in the supplied array.
3980 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3984 // No consecutive 1 bits.
3985 DCHECK((mark_bits & 0x180) != 0x180);
3986 DCHECK((mark_bits & 0x18000) != 0x18000);
3987 DCHECK((mark_bits & 0x1800000) != 0x1800000);
3989 while (mark_bits != 0) {
3990 int byte = (mark_bits & 0xff);
3993 DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
3994 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3995 int objects_in_these_8_words = table[0];
3996 DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
3997 DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
3998 for (int i = 0; i < objects_in_these_8_words; i++) {
3999 starts[objects++] = offset + table[1 + i];
4008 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4009 int required_freed_bytes) {
4011 int max_freed_overall = 0;
4012 PageIterator it(space);
4013 while (it.has_next()) {
4014 Page* p = it.next();
4015 max_freed = SweepInParallel(p, space);
4016 DCHECK(max_freed >= 0);
4017 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
4020 max_freed_overall = Max(max_freed, max_freed_overall);
4021 if (p == space->end_of_unswept_pages()) break;
4023 return max_freed_overall;
4027 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
4029 if (page->TryParallelSweeping()) {
4030 FreeList* free_list = space == heap()->old_pointer_space()
4031 ? free_list_old_pointer_space_.get()
4032 : free_list_old_data_space_.get();
4033 FreeList private_free_list(space);
4034 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
4035 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
4036 free_list->Concatenate(&private_free_list);
4042 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4043 space->ClearStats();
4045 // We defensively initialize end_of_unswept_pages_ here with the first page
4046 // of the pages list.
4047 space->set_end_of_unswept_pages(space->FirstPage());
4049 PageIterator it(space);
4051 int pages_swept = 0;
4052 bool unused_page_present = false;
4053 bool parallel_sweeping_active = false;
4055 while (it.has_next()) {
4056 Page* p = it.next();
4057 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4059 // Clear sweeping flags indicating that marking bits are still intact.
4062 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4063 p->IsEvacuationCandidate()) {
4064 // Will be processed in EvacuateNewSpaceAndCandidates.
4065 DCHECK(evacuation_candidates_.length() > 0);
4069 // One unused page is kept, all further are released before sweeping them.
4070 if (p->LiveBytes() == 0) {
4071 if (unused_page_present) {
4072 if (FLAG_gc_verbose) {
4073 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4074 reinterpret_cast<intptr_t>(p));
4076 // Adjust unswept free bytes because releasing a page expects said
4077 // counter to be accurate for unswept pages.
4078 space->IncreaseUnsweptFreeBytes(p);
4079 space->ReleasePage(p);
4082 unused_page_present = true;
4086 case CONCURRENT_SWEEPING:
4087 if (!parallel_sweeping_active) {
4088 if (FLAG_gc_verbose) {
4089 PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
4090 reinterpret_cast<intptr_t>(p));
4092 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4093 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4095 parallel_sweeping_active = true;
4097 if (FLAG_gc_verbose) {
4098 PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
4099 reinterpret_cast<intptr_t>(p));
4101 p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
4102 space->IncreaseUnsweptFreeBytes(p);
4104 space->set_end_of_unswept_pages(p);
4106 case SEQUENTIAL_SWEEPING: {
4107 if (FLAG_gc_verbose) {
4108 PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
4110 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4111 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4112 ZAP_FREE_SPACE>(space, NULL, p, NULL);
4113 } else if (space->identity() == CODE_SPACE) {
4114 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
4115 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4117 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
4118 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
4123 default: { UNREACHABLE(); }
4127 if (FLAG_gc_verbose) {
4128 PrintF("SweepSpace: %s (%d pages swept)\n",
4129 AllocationSpaceName(space->identity()), pages_swept);
4132 // Give pages that are queued to be freed back to the OS.
4133 heap()->FreeQueuedChunks();
4137 void MarkCompactCollector::SweepSpaces() {
4138 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4139 double start_time = 0.0;
4140 if (FLAG_print_cumulative_gc_stat) {
4141 start_time = base::OS::TimeCurrentMillis();
4145 state_ = SWEEP_SPACES;
4147 MoveEvacuationCandidatesToEndOfPagesList();
4149 // Noncompacting collections simply sweep the spaces to clear the mark
4150 // bits and free the nonlive blocks (for old and map spaces). We sweep
4151 // the map space last because freeing non-live maps overwrites them and
4152 // the other spaces rely on possibly non-live maps to get the sizes for
4153 // non-live objects.
4155 GCTracer::Scope sweep_scope(heap()->tracer(),
4156 GCTracer::Scope::MC_SWEEP_OLDSPACE);
4158 SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
4159 SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
4161 sweeping_in_progress_ = true;
4162 if (FLAG_concurrent_sweeping) {
4163 StartSweeperThreads();
4166 RemoveDeadInvalidatedCode();
4169 GCTracer::Scope sweep_scope(heap()->tracer(),
4170 GCTracer::Scope::MC_SWEEP_CODE);
4171 SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
4175 GCTracer::Scope sweep_scope(heap()->tracer(),
4176 GCTracer::Scope::MC_SWEEP_CELL);
4177 SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
4178 SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
4181 EvacuateNewSpaceAndCandidates();
4183 // ClearNonLiveTransitions depends on precise sweeping of map space to
4184 // detect whether unmarked map became dead in this collection or in one
4185 // of the previous ones.
4187 GCTracer::Scope sweep_scope(heap()->tracer(),
4188 GCTracer::Scope::MC_SWEEP_MAP);
4189 SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
4192 // Deallocate unmarked objects and clear marked bits for marked objects.
4193 heap_->lo_space()->FreeUnmarkedObjects();
4195 // Deallocate evacuated candidate pages.
4196 ReleaseEvacuationCandidates();
4197 CodeRange* code_range = heap()->isolate()->code_range();
4198 if (code_range != NULL && code_range->valid()) {
4199 code_range->ReserveEmergencyBlock();
4202 if (FLAG_print_cumulative_gc_stat) {
4203 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4209 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4210 PageIterator it(space);
4211 while (it.has_next()) {
4212 Page* p = it.next();
4213 if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
4214 p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
4217 DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
4222 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4223 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4224 ParallelSweepSpaceComplete(heap()->old_data_space());
4228 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4229 if (isolate()->debug()->is_loaded() ||
4230 isolate()->debug()->has_break_points()) {
4235 if (code_flusher_ != NULL) return;
4236 code_flusher_ = new CodeFlusher(isolate());
4238 if (code_flusher_ == NULL) return;
4239 code_flusher_->EvictAllCandidates();
4240 delete code_flusher_;
4241 code_flusher_ = NULL;
4244 if (FLAG_trace_code_flushing) {
4245 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4250 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4251 // Our profiling tools do not expect intersections between
4252 // code objects. We should either reenable it or change our tools.
4253 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4255 if (obj->IsCode()) {
4256 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4261 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4264 void MarkCompactCollector::Initialize() {
4265 MarkCompactMarkingVisitor::Initialize();
4266 IncrementalMarking::Initialize();
4270 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4271 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4275 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4276 SlotsBuffer** buffer_address, SlotType type,
4277 Address addr, AdditionMode mode) {
4278 SlotsBuffer* buffer = *buffer_address;
4279 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4280 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4281 allocator->DeallocateChain(buffer_address);
4284 buffer = allocator->AllocateBuffer(buffer);
4285 *buffer_address = buffer;
4287 DCHECK(buffer->HasSpaceForTypedSlot());
4288 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4289 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4294 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4295 if (RelocInfo::IsCodeTarget(rmode)) {
4296 return SlotsBuffer::CODE_TARGET_SLOT;
4297 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4298 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4299 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4300 return SlotsBuffer::DEBUG_TARGET_SLOT;
4301 } else if (RelocInfo::IsJSReturn(rmode)) {
4302 return SlotsBuffer::JS_RETURN_SLOT;
4305 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4309 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4310 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4311 RelocInfo::Mode rmode = rinfo->rmode();
4312 if (target_page->IsEvacuationCandidate() &&
4313 (rinfo->host() == NULL ||
4314 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4316 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4317 // This doesn't need to be typed since it is just a normal heap pointer.
4318 Object** target_pointer =
4319 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4320 success = SlotsBuffer::AddTo(
4321 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4322 target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
4323 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4324 success = SlotsBuffer::AddTo(
4325 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4326 SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
4327 SlotsBuffer::FAIL_ON_OVERFLOW);
4329 success = SlotsBuffer::AddTo(
4330 &slots_buffer_allocator_, target_page->slots_buffer_address(),
4331 SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
4334 EvictEvacuationCandidate(target_page);
4340 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4341 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4342 if (target_page->IsEvacuationCandidate() &&
4343 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4344 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4345 target_page->slots_buffer_address(),
4346 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4347 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4348 EvictEvacuationCandidate(target_page);
4354 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4355 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4356 if (is_compacting()) {
4358 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4360 MarkBit mark_bit = Marking::MarkBitFrom(host);
4361 if (Marking::IsBlack(mark_bit)) {
4362 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4363 RecordRelocSlot(&rinfo, target);
4369 static inline SlotsBuffer::SlotType DecodeSlotType(
4370 SlotsBuffer::ObjectSlot slot) {
4371 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4375 void SlotsBuffer::UpdateSlots(Heap* heap) {
4376 PointersUpdatingVisitor v(heap);
4378 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4379 ObjectSlot slot = slots_[slot_idx];
4380 if (!IsTypedSlot(slot)) {
4381 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4384 DCHECK(slot_idx < idx_);
4385 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4386 reinterpret_cast<Address>(slots_[slot_idx]));
4392 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4393 PointersUpdatingVisitor v(heap);
4395 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4396 ObjectSlot slot = slots_[slot_idx];
4397 if (!IsTypedSlot(slot)) {
4398 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4399 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4403 DCHECK(slot_idx < idx_);
4404 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4405 if (!IsOnInvalidatedCodeObject(pc)) {
4406 UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
4407 reinterpret_cast<Address>(slots_[slot_idx]));
4414 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4415 return new SlotsBuffer(next_buffer);
4419 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4424 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4425 SlotsBuffer* buffer = *buffer_address;
4426 while (buffer != NULL) {
4427 SlotsBuffer* next_buffer = buffer->next();
4428 DeallocateBuffer(buffer);
4429 buffer = next_buffer;
4431 *buffer_address = NULL;
4434 } // namespace v8::internal