1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "code-stubs.h"
8 #include "compilation-cache.h"
9 #include "cpu-profiler.h"
10 #include "deoptimizer.h"
11 #include "execution.h"
13 #include "global-handles.h"
14 #include "heap-profiler.h"
16 #include "incremental-marking.h"
17 #include "mark-compact.h"
18 #include "objects-visiting.h"
19 #include "objects-visiting-inl.h"
20 #include "spaces-inl.h"
21 #include "stub-cache.h"
22 #include "sweeper-thread.h"
28 const char* Marking::kWhiteBitPattern = "00";
29 const char* Marking::kBlackBitPattern = "10";
30 const char* Marking::kGreyBitPattern = "11";
31 const char* Marking::kImpossibleBitPattern = "01";
34 // -------------------------------------------------------------------------
35 // MarkCompactCollector
37 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
41 sweep_precisely_(false),
42 reduce_memory_footprint_(false),
43 abort_incremental_marking_(false),
44 marking_parity_(ODD_MARKING_PARITY),
46 was_marked_incrementally_(false),
47 sweeping_pending_(false),
48 pending_sweeper_jobs_semaphore_(0),
49 sequential_sweeping_(false),
51 migration_slots_buffer_(NULL),
54 encountered_weak_collections_(NULL),
55 have_code_to_deoptimize_(false) { }
58 class VerifyMarkingVisitor: public ObjectVisitor {
60 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
62 void VisitPointers(Object** start, Object** end) {
63 for (Object** current = start; current < end; current++) {
64 if ((*current)->IsHeapObject()) {
65 HeapObject* object = HeapObject::cast(*current);
66 CHECK(heap_->mark_compact_collector()->IsMarked(object));
71 void VisitEmbeddedPointer(RelocInfo* rinfo) {
72 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
73 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
74 Object* p = rinfo->target_object();
79 void VisitCell(RelocInfo* rinfo) {
80 Code* code = rinfo->host();
81 ASSERT(rinfo->rmode() == RelocInfo::CELL);
82 if (!code->IsWeakObject(rinfo->target_cell())) {
83 ObjectVisitor::VisitCell(rinfo);
92 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
93 VerifyMarkingVisitor visitor(heap);
95 Address next_object_must_be_here_or_later = bottom;
97 for (Address current = bottom;
99 current += kPointerSize) {
100 object = HeapObject::FromAddress(current);
101 if (MarkCompactCollector::IsMarked(object)) {
102 CHECK(current >= next_object_must_be_here_or_later);
103 object->Iterate(&visitor);
104 next_object_must_be_here_or_later = current + object->Size();
110 static void VerifyMarking(NewSpace* space) {
111 Address end = space->top();
112 NewSpacePageIterator it(space->bottom(), end);
113 // The bottom position is at the start of its page. Allows us to use
114 // page->area_start() as start of range on all pages.
115 CHECK_EQ(space->bottom(),
116 NewSpacePage::FromAddress(space->bottom())->area_start());
117 while (it.has_next()) {
118 NewSpacePage* page = it.next();
119 Address limit = it.has_next() ? page->area_end() : end;
120 CHECK(limit == end || !page->Contains(end));
121 VerifyMarking(space->heap(), page->area_start(), limit);
126 static void VerifyMarking(PagedSpace* space) {
127 PageIterator it(space);
129 while (it.has_next()) {
131 VerifyMarking(space->heap(), p->area_start(), p->area_end());
136 static void VerifyMarking(Heap* heap) {
137 VerifyMarking(heap->old_pointer_space());
138 VerifyMarking(heap->old_data_space());
139 VerifyMarking(heap->code_space());
140 VerifyMarking(heap->cell_space());
141 VerifyMarking(heap->property_cell_space());
142 VerifyMarking(heap->map_space());
143 VerifyMarking(heap->new_space());
145 VerifyMarkingVisitor visitor(heap);
147 LargeObjectIterator it(heap->lo_space());
148 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
149 if (MarkCompactCollector::IsMarked(obj)) {
150 obj->Iterate(&visitor);
154 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
158 class VerifyEvacuationVisitor: public ObjectVisitor {
160 void VisitPointers(Object** start, Object** end) {
161 for (Object** current = start; current < end; current++) {
162 if ((*current)->IsHeapObject()) {
163 HeapObject* object = HeapObject::cast(*current);
164 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
171 static void VerifyEvacuation(Address bottom, Address top) {
172 VerifyEvacuationVisitor visitor;
174 Address next_object_must_be_here_or_later = bottom;
176 for (Address current = bottom;
178 current += kPointerSize) {
179 object = HeapObject::FromAddress(current);
180 if (MarkCompactCollector::IsMarked(object)) {
181 CHECK(current >= next_object_must_be_here_or_later);
182 object->Iterate(&visitor);
183 next_object_must_be_here_or_later = current + object->Size();
189 static void VerifyEvacuation(NewSpace* space) {
190 NewSpacePageIterator it(space->bottom(), space->top());
191 VerifyEvacuationVisitor visitor;
193 while (it.has_next()) {
194 NewSpacePage* page = it.next();
195 Address current = page->area_start();
196 Address limit = it.has_next() ? page->area_end() : space->top();
197 CHECK(limit == space->top() || !page->Contains(space->top()));
198 while (current < limit) {
199 HeapObject* object = HeapObject::FromAddress(current);
200 object->Iterate(&visitor);
201 current += object->Size();
207 static void VerifyEvacuation(PagedSpace* space) {
208 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
210 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
211 space->was_swept_conservatively()) return;
212 PageIterator it(space);
214 while (it.has_next()) {
216 if (p->IsEvacuationCandidate()) continue;
217 VerifyEvacuation(p->area_start(), p->area_end());
222 static void VerifyEvacuation(Heap* heap) {
223 VerifyEvacuation(heap->old_pointer_space());
224 VerifyEvacuation(heap->old_data_space());
225 VerifyEvacuation(heap->code_space());
226 VerifyEvacuation(heap->cell_space());
227 VerifyEvacuation(heap->property_cell_space());
228 VerifyEvacuation(heap->map_space());
229 VerifyEvacuation(heap->new_space());
231 VerifyEvacuationVisitor visitor;
232 heap->IterateStrongRoots(&visitor, VISIT_ALL);
234 #endif // VERIFY_HEAP
238 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
240 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
242 void VisitPointers(Object** start, Object** end) {
243 for (Object** current = start; current < end; current++) {
244 if ((*current)->IsHeapObject()) {
245 HeapObject* object = HeapObject::cast(*current);
246 if (object->IsString()) continue;
247 switch (object->map()->instance_type()) {
248 case JS_FUNCTION_TYPE:
249 CheckContext(JSFunction::cast(object)->context());
251 case JS_GLOBAL_PROXY_TYPE:
252 CheckContext(JSGlobalProxy::cast(object)->native_context());
254 case JS_GLOBAL_OBJECT_TYPE:
255 case JS_BUILTINS_OBJECT_TYPE:
256 CheckContext(GlobalObject::cast(object)->native_context());
262 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
265 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
266 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
268 case FIXED_ARRAY_TYPE:
269 if (object->IsContext()) {
270 CheckContext(object);
272 FixedArray* array = FixedArray::cast(object);
273 int length = array->length();
274 // Set array length to zero to prevent cycles while iterating
275 // over array bodies, this is easier than intrusive marking.
276 array->set_length(0);
278 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
279 array->set_length(length);
285 case TYPE_FEEDBACK_INFO_TYPE:
286 object->Iterate(this);
288 case DECLARED_ACCESSOR_INFO_TYPE:
289 case EXECUTABLE_ACCESSOR_INFO_TYPE:
290 case BYTE_ARRAY_TYPE:
291 case CALL_HANDLER_INFO_TYPE:
293 case FIXED_DOUBLE_ARRAY_TYPE:
294 case HEAP_NUMBER_TYPE:
298 case INTERCEPTOR_INFO_TYPE:
301 case SHARED_FUNCTION_INFO_TYPE:
311 void CheckContext(Object* context) {
312 if (!context->IsContext()) return;
313 Context* native_context = Context::cast(context)->native_context();
314 if (current_native_context_ == NULL) {
315 current_native_context_ = native_context;
317 CHECK_EQ(current_native_context_, native_context);
321 Context* current_native_context_;
325 static void VerifyNativeContextSeparation(Heap* heap) {
326 HeapObjectIterator it(heap->code_space());
328 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
329 VerifyNativeContextSeparationVisitor visitor;
330 Code::cast(object)->CodeIterateBody(&visitor);
336 void MarkCompactCollector::SetUp() {
337 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
338 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
342 void MarkCompactCollector::TearDown() {
347 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
348 p->MarkEvacuationCandidate();
349 evacuation_candidates_.Add(p);
353 static void TraceFragmentation(PagedSpace* space) {
354 int number_of_pages = space->CountTotalPages();
355 intptr_t reserved = (number_of_pages * space->AreaSize());
356 intptr_t free = reserved - space->SizeOfObjects();
357 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
358 AllocationSpaceName(space->identity()),
360 static_cast<int>(free),
361 static_cast<double>(free) * 100 / reserved);
365 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
367 ASSERT(evacuation_candidates_.length() == 0);
369 #ifdef ENABLE_GDB_JIT_INTERFACE
370 // If GDBJIT interface is active disable compaction.
371 if (FLAG_gdbjit) return false;
374 CollectEvacuationCandidates(heap()->old_pointer_space());
375 CollectEvacuationCandidates(heap()->old_data_space());
377 if (FLAG_compact_code_space &&
378 (mode == NON_INCREMENTAL_COMPACTION ||
379 FLAG_incremental_code_compaction)) {
380 CollectEvacuationCandidates(heap()->code_space());
381 } else if (FLAG_trace_fragmentation) {
382 TraceFragmentation(heap()->code_space());
385 if (FLAG_trace_fragmentation) {
386 TraceFragmentation(heap()->map_space());
387 TraceFragmentation(heap()->cell_space());
388 TraceFragmentation(heap()->property_cell_space());
391 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
392 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
393 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
395 compacting_ = evacuation_candidates_.length() > 0;
402 void MarkCompactCollector::CollectGarbage() {
403 // Make sure that Prepare() has been called. The individual steps below will
404 // update the state as they proceed.
405 ASSERT(state_ == PREPARE_GC);
406 ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
409 ASSERT(heap_->incremental_marking()->IsStopped());
411 if (FLAG_collect_maps) ClearNonLiveReferences();
413 ClearWeakCollections();
416 if (FLAG_verify_heap) {
417 VerifyMarking(heap_);
423 if (!FLAG_collect_maps) ReattachInitialMaps();
426 if (FLAG_verify_native_context_separation) {
427 VerifyNativeContextSeparation(heap_);
432 if (heap()->weak_embedded_objects_verification_enabled()) {
433 VerifyWeakEmbeddedObjectsInCode();
435 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
436 VerifyOmittedMapChecks();
442 if (marking_parity_ == EVEN_MARKING_PARITY) {
443 marking_parity_ = ODD_MARKING_PARITY;
445 ASSERT(marking_parity_ == ODD_MARKING_PARITY);
446 marking_parity_ = EVEN_MARKING_PARITY;
454 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
455 PageIterator it(space);
457 while (it.has_next()) {
459 CHECK(p->markbits()->IsClean());
460 CHECK_EQ(0, p->LiveBytes());
465 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
466 NewSpacePageIterator it(space->bottom(), space->top());
468 while (it.has_next()) {
469 NewSpacePage* p = it.next();
470 CHECK(p->markbits()->IsClean());
471 CHECK_EQ(0, p->LiveBytes());
476 void MarkCompactCollector::VerifyMarkbitsAreClean() {
477 VerifyMarkbitsAreClean(heap_->old_pointer_space());
478 VerifyMarkbitsAreClean(heap_->old_data_space());
479 VerifyMarkbitsAreClean(heap_->code_space());
480 VerifyMarkbitsAreClean(heap_->cell_space());
481 VerifyMarkbitsAreClean(heap_->property_cell_space());
482 VerifyMarkbitsAreClean(heap_->map_space());
483 VerifyMarkbitsAreClean(heap_->new_space());
485 LargeObjectIterator it(heap_->lo_space());
486 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
487 MarkBit mark_bit = Marking::MarkBitFrom(obj);
488 CHECK(Marking::IsWhite(mark_bit));
489 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
494 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
495 HeapObjectIterator code_iterator(heap()->code_space());
496 for (HeapObject* obj = code_iterator.Next();
498 obj = code_iterator.Next()) {
499 Code* code = Code::cast(obj);
500 if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
501 if (WillBeDeoptimized(code)) continue;
502 code->VerifyEmbeddedObjectsDependency();
507 void MarkCompactCollector::VerifyOmittedMapChecks() {
508 HeapObjectIterator iterator(heap()->map_space());
509 for (HeapObject* obj = iterator.Next();
511 obj = iterator.Next()) {
512 Map* map = Map::cast(obj);
513 map->VerifyOmittedMapChecks();
516 #endif // VERIFY_HEAP
519 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
520 PageIterator it(space);
522 while (it.has_next()) {
523 Bitmap::Clear(it.next());
528 static void ClearMarkbitsInNewSpace(NewSpace* space) {
529 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
531 while (it.has_next()) {
532 Bitmap::Clear(it.next());
537 void MarkCompactCollector::ClearMarkbits() {
538 ClearMarkbitsInPagedSpace(heap_->code_space());
539 ClearMarkbitsInPagedSpace(heap_->map_space());
540 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
541 ClearMarkbitsInPagedSpace(heap_->old_data_space());
542 ClearMarkbitsInPagedSpace(heap_->cell_space());
543 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
544 ClearMarkbitsInNewSpace(heap_->new_space());
546 LargeObjectIterator it(heap_->lo_space());
547 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
548 MarkBit mark_bit = Marking::MarkBitFrom(obj);
550 mark_bit.Next().Clear();
551 Page::FromAddress(obj->address())->ResetProgressBar();
552 Page::FromAddress(obj->address())->ResetLiveBytes();
557 class MarkCompactCollector::SweeperTask : public v8::Task {
559 SweeperTask(Heap* heap, PagedSpace* space)
560 : heap_(heap), space_(space) {}
562 virtual ~SweeperTask() {}
565 // v8::Task overrides.
566 virtual void Run() V8_OVERRIDE {
567 heap_->mark_compact_collector()->SweepInParallel(space_);
568 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
574 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
578 void MarkCompactCollector::StartSweeperThreads() {
579 ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
580 ASSERT(free_list_old_data_space_.get()->IsEmpty());
581 sweeping_pending_ = true;
582 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
583 isolate()->sweeper_threads()[i]->StartSweeping();
585 if (FLAG_job_based_sweeping) {
586 V8::GetCurrentPlatform()->CallOnBackgroundThread(
587 new SweeperTask(heap(), heap()->old_data_space()),
588 v8::Platform::kShortRunningTask);
589 V8::GetCurrentPlatform()->CallOnBackgroundThread(
590 new SweeperTask(heap(), heap()->old_pointer_space()),
591 v8::Platform::kShortRunningTask);
596 void MarkCompactCollector::WaitUntilSweepingCompleted() {
597 ASSERT(sweeping_pending_ == true);
598 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
599 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
601 if (FLAG_job_based_sweeping) {
602 // Wait twice for both jobs.
603 pending_sweeper_jobs_semaphore_.Wait();
604 pending_sweeper_jobs_semaphore_.Wait();
606 ParallelSweepSpacesComplete();
607 sweeping_pending_ = false;
608 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
609 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
610 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
611 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
615 bool MarkCompactCollector::IsSweepingCompleted() {
616 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
617 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
621 if (FLAG_job_based_sweeping) {
622 if (!pending_sweeper_jobs_semaphore_.WaitFor(TimeDelta::FromSeconds(0))) {
625 pending_sweeper_jobs_semaphore_.Signal();
631 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
634 if (space == heap()->old_pointer_space()) {
635 free_list = free_list_old_pointer_space_.get();
636 } else if (space == heap()->old_data_space()) {
637 free_list = free_list_old_data_space_.get();
639 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
640 // to only refill them for old data and pointer spaces.
644 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
645 space->AddToAccountingStats(freed_bytes);
646 space->DecrementUnsweptFreeBytes(freed_bytes);
650 bool MarkCompactCollector::AreSweeperThreadsActivated() {
651 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
655 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
656 return sweeping_pending_;
660 void Marking::TransferMark(Address old_start, Address new_start) {
661 // This is only used when resizing an object.
662 ASSERT(MemoryChunk::FromAddress(old_start) ==
663 MemoryChunk::FromAddress(new_start));
665 if (!heap_->incremental_marking()->IsMarking()) return;
667 // If the mark doesn't move, we don't check the color of the object.
668 // It doesn't matter whether the object is black, since it hasn't changed
669 // size, so the adjustment to the live data count will be zero anyway.
670 if (old_start == new_start) return;
672 MarkBit new_mark_bit = MarkBitFrom(new_start);
673 MarkBit old_mark_bit = MarkBitFrom(old_start);
676 ObjectColor old_color = Color(old_mark_bit);
679 if (Marking::IsBlack(old_mark_bit)) {
680 old_mark_bit.Clear();
681 ASSERT(IsWhite(old_mark_bit));
682 Marking::MarkBlack(new_mark_bit);
684 } else if (Marking::IsGrey(old_mark_bit)) {
685 old_mark_bit.Clear();
686 old_mark_bit.Next().Clear();
687 ASSERT(IsWhite(old_mark_bit));
688 heap_->incremental_marking()->WhiteToGreyAndPush(
689 HeapObject::FromAddress(new_start), new_mark_bit);
690 heap_->incremental_marking()->RestartIfNotMarking();
694 ObjectColor new_color = Color(new_mark_bit);
695 ASSERT(new_color == old_color);
700 const char* AllocationSpaceName(AllocationSpace space) {
702 case NEW_SPACE: return "NEW_SPACE";
703 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
704 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
705 case CODE_SPACE: return "CODE_SPACE";
706 case MAP_SPACE: return "MAP_SPACE";
707 case CELL_SPACE: return "CELL_SPACE";
708 case PROPERTY_CELL_SPACE:
709 return "PROPERTY_CELL_SPACE";
710 case LO_SPACE: return "LO_SPACE";
719 // Returns zero for pages that have so little fragmentation that it is not
720 // worth defragmenting them. Otherwise a positive integer that gives an
721 // estimate of fragmentation on an arbitrary scale.
722 static int FreeListFragmentation(PagedSpace* space, Page* p) {
723 // If page was not swept then there are no free list items on it.
724 if (!p->WasSwept()) {
725 if (FLAG_trace_fragmentation) {
726 PrintF("%p [%s]: %d bytes live (unswept)\n",
727 reinterpret_cast<void*>(p),
728 AllocationSpaceName(space->identity()),
734 PagedSpace::SizeStats sizes;
735 space->ObtainFreeListStatistics(p, &sizes);
738 intptr_t ratio_threshold;
739 intptr_t area_size = space->AreaSize();
740 if (space->identity() == CODE_SPACE) {
741 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
743 ratio_threshold = 10;
745 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
747 ratio_threshold = 15;
750 if (FLAG_trace_fragmentation) {
751 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
752 reinterpret_cast<void*>(p),
753 AllocationSpaceName(space->identity()),
754 static_cast<int>(sizes.small_size_),
755 static_cast<double>(sizes.small_size_ * 100) /
757 static_cast<int>(sizes.medium_size_),
758 static_cast<double>(sizes.medium_size_ * 100) /
760 static_cast<int>(sizes.large_size_),
761 static_cast<double>(sizes.large_size_ * 100) /
763 static_cast<int>(sizes.huge_size_),
764 static_cast<double>(sizes.huge_size_ * 100) /
766 (ratio > ratio_threshold) ? "[fragmented]" : "");
769 if (FLAG_always_compact && sizes.Total() != area_size) {
773 if (ratio <= ratio_threshold) return 0; // Not fragmented.
775 return static_cast<int>(ratio - ratio_threshold);
779 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
780 ASSERT(space->identity() == OLD_POINTER_SPACE ||
781 space->identity() == OLD_DATA_SPACE ||
782 space->identity() == CODE_SPACE);
784 static const int kMaxMaxEvacuationCandidates = 1000;
785 int number_of_pages = space->CountTotalPages();
786 int max_evacuation_candidates =
787 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
789 if (FLAG_stress_compaction || FLAG_always_compact) {
790 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
795 Candidate() : fragmentation_(0), page_(NULL) { }
796 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
798 int fragmentation() { return fragmentation_; }
799 Page* page() { return page_; }
806 enum CompactionMode {
808 REDUCE_MEMORY_FOOTPRINT
811 CompactionMode mode = COMPACT_FREE_LISTS;
813 intptr_t reserved = number_of_pages * space->AreaSize();
814 intptr_t over_reserved = reserved - space->SizeOfObjects();
815 static const intptr_t kFreenessThreshold = 50;
817 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
818 // If reduction of memory footprint was requested, we are aggressive
819 // about choosing pages to free. We expect that half-empty pages
820 // are easier to compact so slightly bump the limit.
821 mode = REDUCE_MEMORY_FOOTPRINT;
822 max_evacuation_candidates += 2;
826 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
827 // If over-usage is very high (more than a third of the space), we
828 // try to free all mostly empty pages. We expect that almost empty
829 // pages are even easier to compact so bump the limit even more.
830 mode = REDUCE_MEMORY_FOOTPRINT;
831 max_evacuation_candidates *= 2;
834 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
835 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
836 "evacuation candidate limit: %d\n",
837 static_cast<double>(over_reserved) / MB,
838 static_cast<double>(reserved) / MB,
839 static_cast<int>(kFreenessThreshold),
840 max_evacuation_candidates);
843 intptr_t estimated_release = 0;
845 Candidate candidates[kMaxMaxEvacuationCandidates];
847 max_evacuation_candidates =
848 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
851 int fragmentation = 0;
852 Candidate* least = NULL;
854 PageIterator it(space);
855 if (it.has_next()) it.next(); // Never compact the first page.
857 while (it.has_next()) {
859 p->ClearEvacuationCandidate();
861 if (FLAG_stress_compaction) {
862 unsigned int counter = space->heap()->ms_count();
863 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
864 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
865 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
866 // Don't try to release too many pages.
867 if (estimated_release >= over_reserved) {
871 intptr_t free_bytes = 0;
873 if (!p->WasSwept()) {
874 free_bytes = (p->area_size() - p->LiveBytes());
876 PagedSpace::SizeStats sizes;
877 space->ObtainFreeListStatistics(p, &sizes);
878 free_bytes = sizes.Total();
881 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
883 if (free_pct >= kFreenessThreshold) {
884 estimated_release += free_bytes;
885 fragmentation = free_pct;
890 if (FLAG_trace_fragmentation) {
891 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
892 reinterpret_cast<void*>(p),
893 AllocationSpaceName(space->identity()),
894 static_cast<int>(free_bytes),
895 static_cast<double>(free_bytes * 100) / p->area_size(),
896 (fragmentation > 0) ? "[fragmented]" : "");
899 fragmentation = FreeListFragmentation(space, p);
902 if (fragmentation != 0) {
903 if (count < max_evacuation_candidates) {
904 candidates[count++] = Candidate(fragmentation, p);
907 for (int i = 0; i < max_evacuation_candidates; i++) {
909 candidates[i].fragmentation() < least->fragmentation()) {
910 least = candidates + i;
914 if (least->fragmentation() < fragmentation) {
915 *least = Candidate(fragmentation, p);
922 for (int i = 0; i < count; i++) {
923 AddEvacuationCandidate(candidates[i].page());
926 if (count > 0 && FLAG_trace_fragmentation) {
927 PrintF("Collected %d evacuation candidates for space %s\n",
929 AllocationSpaceName(space->identity()));
934 void MarkCompactCollector::AbortCompaction() {
936 int npages = evacuation_candidates_.length();
937 for (int i = 0; i < npages; i++) {
938 Page* p = evacuation_candidates_[i];
939 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
940 p->ClearEvacuationCandidate();
941 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
944 evacuation_candidates_.Rewind(0);
945 invalidated_code_.Rewind(0);
947 ASSERT_EQ(0, evacuation_candidates_.length());
951 void MarkCompactCollector::Prepare(GCTracer* tracer) {
952 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
954 // Rather than passing the tracer around we stash it in a static member
959 ASSERT(state_ == IDLE);
963 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
965 if (IsConcurrentSweepingInProgress()) {
966 // Instead of waiting we could also abort the sweeper threads here.
967 WaitUntilSweepingCompleted();
970 // Clear marking bits if incremental marking is aborted.
971 if (was_marked_incrementally_ && abort_incremental_marking_) {
972 heap()->incremental_marking()->Abort();
975 was_marked_incrementally_ = false;
978 // Don't start compaction if we are in the middle of incremental
979 // marking cycle. We did not collect any slots.
980 if (!FLAG_never_compact && !was_marked_incrementally_) {
981 StartCompaction(NON_INCREMENTAL_COMPACTION);
984 PagedSpaces spaces(heap());
985 for (PagedSpace* space = spaces.next();
987 space = spaces.next()) {
988 space->PrepareForMarkCompact();
992 if (!was_marked_incrementally_ && FLAG_verify_heap) {
993 VerifyMarkbitsAreClean();
999 void MarkCompactCollector::Finish() {
1001 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1004 // The stub cache is not traversed during GC; clear the cache to
1005 // force lazy re-initialization of it. This must be done after the
1006 // GC, because it relies on the new address of certain old space
1007 // objects (empty string, illegal builtin).
1008 isolate()->stub_cache()->Clear();
1010 if (have_code_to_deoptimize_) {
1011 // Some code objects were marked for deoptimization during the GC.
1012 Deoptimizer::DeoptimizeMarkedCode(isolate());
1013 have_code_to_deoptimize_ = false;
1018 // -------------------------------------------------------------------------
1019 // Phase 1: tracing and marking live objects.
1020 // before: all objects are in normal state.
1021 // after: a live object's map pointer is marked as '00'.
1023 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1024 // collection. Before marking, all objects are in their normal state. After
1025 // marking, live objects' map pointers are marked indicating that the object
1026 // has been found reachable.
1028 // The marking algorithm is a (mostly) depth-first (because of possible stack
1029 // overflow) traversal of the graph of objects reachable from the roots. It
1030 // uses an explicit stack of pointers rather than recursion. The young
1031 // generation's inactive ('from') space is used as a marking stack. The
1032 // objects in the marking stack are the ones that have been reached and marked
1033 // but their children have not yet been visited.
1035 // The marking stack can overflow during traversal. In that case, we set an
1036 // overflow flag. When the overflow flag is set, we continue marking objects
1037 // reachable from the objects on the marking stack, but no longer push them on
1038 // the marking stack. Instead, we mark them as both marked and overflowed.
1039 // When the stack is in the overflowed state, objects marked as overflowed
1040 // have been reached and marked but their children have not been visited yet.
1041 // After emptying the marking stack, we clear the overflow flag and traverse
1042 // the heap looking for objects marked as overflowed, push them on the stack,
1043 // and continue with marking. This process repeats until all reachable
1044 // objects have been marked.
1046 void CodeFlusher::ProcessJSFunctionCandidates() {
1047 Code* lazy_compile =
1048 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1049 Object* undefined = isolate_->heap()->undefined_value();
1051 JSFunction* candidate = jsfunction_candidates_head_;
1052 JSFunction* next_candidate;
1053 while (candidate != NULL) {
1054 next_candidate = GetNextCandidate(candidate);
1055 ClearNextCandidate(candidate, undefined);
1057 SharedFunctionInfo* shared = candidate->shared();
1059 Code* code = shared->code();
1060 MarkBit code_mark = Marking::MarkBitFrom(code);
1061 if (!code_mark.Get()) {
1062 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1063 PrintF("[code-flushing clears: ");
1064 shared->ShortPrint();
1065 PrintF(" - age: %d]\n", code->GetAge());
1067 shared->set_code(lazy_compile);
1068 candidate->set_code(lazy_compile);
1070 candidate->set_code(code);
1073 // We are in the middle of a GC cycle so the write barrier in the code
1074 // setter did not record the slot update and we have to do that manually.
1075 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1076 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1077 isolate_->heap()->mark_compact_collector()->
1078 RecordCodeEntrySlot(slot, target);
1080 Object** shared_code_slot =
1081 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1082 isolate_->heap()->mark_compact_collector()->
1083 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1085 candidate = next_candidate;
1088 jsfunction_candidates_head_ = NULL;
1092 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1093 Code* lazy_compile =
1094 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1096 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1097 SharedFunctionInfo* next_candidate;
1098 while (candidate != NULL) {
1099 next_candidate = GetNextCandidate(candidate);
1100 ClearNextCandidate(candidate);
1102 Code* code = candidate->code();
1103 MarkBit code_mark = Marking::MarkBitFrom(code);
1104 if (!code_mark.Get()) {
1105 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1106 PrintF("[code-flushing clears: ");
1107 candidate->ShortPrint();
1108 PrintF(" - age: %d]\n", code->GetAge());
1110 candidate->set_code(lazy_compile);
1113 Object** code_slot =
1114 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1115 isolate_->heap()->mark_compact_collector()->
1116 RecordSlot(code_slot, code_slot, *code_slot);
1118 candidate = next_candidate;
1121 shared_function_info_candidates_head_ = NULL;
1125 void CodeFlusher::ProcessOptimizedCodeMaps() {
1126 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1128 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1129 SharedFunctionInfo* next_holder;
1131 while (holder != NULL) {
1132 next_holder = GetNextCodeMap(holder);
1133 ClearNextCodeMap(holder);
1135 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1136 int new_length = SharedFunctionInfo::kEntriesStart;
1137 int old_length = code_map->length();
1138 for (int i = SharedFunctionInfo::kEntriesStart;
1140 i += SharedFunctionInfo::kEntryLength) {
1142 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1143 if (!Marking::MarkBitFrom(code).Get()) continue;
1145 // Move every slot in the entry.
1146 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1147 int dst_index = new_length++;
1148 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1149 Object* object = code_map->get(i + j);
1150 code_map->set(dst_index, object);
1151 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1152 ASSERT(object->IsSmi());
1154 ASSERT(Marking::IsBlack(
1155 Marking::MarkBitFrom(HeapObject::cast(*slot))));
1156 isolate_->heap()->mark_compact_collector()->
1157 RecordSlot(slot, slot, *slot);
1162 // Trim the optimized code map if entries have been removed.
1163 if (new_length < old_length) {
1164 holder->TrimOptimizedCodeMap(old_length - new_length);
1167 holder = next_holder;
1170 optimized_code_map_holder_head_ = NULL;
1174 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1175 // Make sure previous flushing decisions are revisited.
1176 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1178 if (FLAG_trace_code_flushing) {
1179 PrintF("[code-flushing abandons function-info: ");
1180 shared_info->ShortPrint();
1184 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1185 SharedFunctionInfo* next_candidate;
1186 if (candidate == shared_info) {
1187 next_candidate = GetNextCandidate(shared_info);
1188 shared_function_info_candidates_head_ = next_candidate;
1189 ClearNextCandidate(shared_info);
1191 while (candidate != NULL) {
1192 next_candidate = GetNextCandidate(candidate);
1194 if (next_candidate == shared_info) {
1195 next_candidate = GetNextCandidate(shared_info);
1196 SetNextCandidate(candidate, next_candidate);
1197 ClearNextCandidate(shared_info);
1201 candidate = next_candidate;
1207 void CodeFlusher::EvictCandidate(JSFunction* function) {
1208 ASSERT(!function->next_function_link()->IsUndefined());
1209 Object* undefined = isolate_->heap()->undefined_value();
1211 // Make sure previous flushing decisions are revisited.
1212 isolate_->heap()->incremental_marking()->RecordWrites(function);
1213 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1215 if (FLAG_trace_code_flushing) {
1216 PrintF("[code-flushing abandons closure: ");
1217 function->shared()->ShortPrint();
1221 JSFunction* candidate = jsfunction_candidates_head_;
1222 JSFunction* next_candidate;
1223 if (candidate == function) {
1224 next_candidate = GetNextCandidate(function);
1225 jsfunction_candidates_head_ = next_candidate;
1226 ClearNextCandidate(function, undefined);
1228 while (candidate != NULL) {
1229 next_candidate = GetNextCandidate(candidate);
1231 if (next_candidate == function) {
1232 next_candidate = GetNextCandidate(function);
1233 SetNextCandidate(candidate, next_candidate);
1234 ClearNextCandidate(function, undefined);
1238 candidate = next_candidate;
1244 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1245 ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1246 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1248 // Make sure previous flushing decisions are revisited.
1249 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1251 if (FLAG_trace_code_flushing) {
1252 PrintF("[code-flushing abandons code-map: ");
1253 code_map_holder->ShortPrint();
1257 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1258 SharedFunctionInfo* next_holder;
1259 if (holder == code_map_holder) {
1260 next_holder = GetNextCodeMap(code_map_holder);
1261 optimized_code_map_holder_head_ = next_holder;
1262 ClearNextCodeMap(code_map_holder);
1264 while (holder != NULL) {
1265 next_holder = GetNextCodeMap(holder);
1267 if (next_holder == code_map_holder) {
1268 next_holder = GetNextCodeMap(code_map_holder);
1269 SetNextCodeMap(holder, next_holder);
1270 ClearNextCodeMap(code_map_holder);
1274 holder = next_holder;
1280 void CodeFlusher::EvictJSFunctionCandidates() {
1281 JSFunction* candidate = jsfunction_candidates_head_;
1282 JSFunction* next_candidate;
1283 while (candidate != NULL) {
1284 next_candidate = GetNextCandidate(candidate);
1285 EvictCandidate(candidate);
1286 candidate = next_candidate;
1288 ASSERT(jsfunction_candidates_head_ == NULL);
1292 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1293 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1294 SharedFunctionInfo* next_candidate;
1295 while (candidate != NULL) {
1296 next_candidate = GetNextCandidate(candidate);
1297 EvictCandidate(candidate);
1298 candidate = next_candidate;
1300 ASSERT(shared_function_info_candidates_head_ == NULL);
1304 void CodeFlusher::EvictOptimizedCodeMaps() {
1305 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1306 SharedFunctionInfo* next_holder;
1307 while (holder != NULL) {
1308 next_holder = GetNextCodeMap(holder);
1309 EvictOptimizedCodeMap(holder);
1310 holder = next_holder;
1312 ASSERT(optimized_code_map_holder_head_ == NULL);
1316 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1317 Heap* heap = isolate_->heap();
1319 JSFunction** slot = &jsfunction_candidates_head_;
1320 JSFunction* candidate = jsfunction_candidates_head_;
1321 while (candidate != NULL) {
1322 if (heap->InFromSpace(candidate)) {
1323 v->VisitPointer(reinterpret_cast<Object**>(slot));
1325 candidate = GetNextCandidate(*slot);
1326 slot = GetNextCandidateSlot(*slot);
1331 MarkCompactCollector::~MarkCompactCollector() {
1332 if (code_flusher_ != NULL) {
1333 delete code_flusher_;
1334 code_flusher_ = NULL;
1339 static inline HeapObject* ShortCircuitConsString(Object** p) {
1340 // Optimization: If the heap object pointed to by p is a non-internalized
1341 // cons string whose right substring is HEAP->empty_string, update
1342 // it in place to its left substring. Return the updated value.
1344 // Here we assume that if we change *p, we replace it with a heap object
1345 // (i.e., the left substring of a cons string is always a heap object).
1347 // The check performed is:
1348 // object->IsConsString() && !object->IsInternalizedString() &&
1349 // (ConsString::cast(object)->second() == HEAP->empty_string())
1350 // except the maps for the object and its possible substrings might be
1352 HeapObject* object = HeapObject::cast(*p);
1353 if (!FLAG_clever_optimizations) return object;
1354 Map* map = object->map();
1355 InstanceType type = map->instance_type();
1356 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1358 Object* second = reinterpret_cast<ConsString*>(object)->second();
1359 Heap* heap = map->GetHeap();
1360 if (second != heap->empty_string()) {
1364 // Since we don't have the object's start, it is impossible to update the
1365 // page dirty marks. Therefore, we only replace the string with its left
1366 // substring when page dirty marks do not change.
1367 Object* first = reinterpret_cast<ConsString*>(object)->first();
1368 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1371 return HeapObject::cast(first);
1375 class MarkCompactMarkingVisitor
1376 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1378 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1379 Map* map, HeapObject* obj);
1381 static void ObjectStatsCountFixedArray(
1382 FixedArrayBase* fixed_array,
1383 FixedArraySubInstanceType fast_type,
1384 FixedArraySubInstanceType dictionary_type);
1386 template<MarkCompactMarkingVisitor::VisitorId id>
1387 class ObjectStatsTracker {
1389 static inline void Visit(Map* map, HeapObject* obj);
1392 static void Initialize();
1394 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1395 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1398 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1399 // Mark all objects pointed to in [start, end).
1400 const int kMinRangeForMarkingRecursion = 64;
1401 if (end - start >= kMinRangeForMarkingRecursion) {
1402 if (VisitUnmarkedObjects(heap, start, end)) return;
1403 // We are close to a stack overflow, so just mark the objects.
1405 MarkCompactCollector* collector = heap->mark_compact_collector();
1406 for (Object** p = start; p < end; p++) {
1407 MarkObjectByPointer(collector, start, p);
1411 // Marks the object black and pushes it on the marking stack.
1412 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1413 MarkBit mark = Marking::MarkBitFrom(object);
1414 heap->mark_compact_collector()->MarkObject(object, mark);
1417 // Marks the object black without pushing it on the marking stack.
1418 // Returns true if object needed marking and false otherwise.
1419 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1420 MarkBit mark_bit = Marking::MarkBitFrom(object);
1421 if (!mark_bit.Get()) {
1422 heap->mark_compact_collector()->SetMark(object, mark_bit);
1428 // Mark object pointed to by p.
1429 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1430 Object** anchor_slot,
1432 if (!(*p)->IsHeapObject()) return;
1433 HeapObject* object = ShortCircuitConsString(p);
1434 collector->RecordSlot(anchor_slot, p, object);
1435 MarkBit mark = Marking::MarkBitFrom(object);
1436 collector->MarkObject(object, mark);
1440 // Visit an unmarked object.
1441 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1444 ASSERT(collector->heap()->Contains(obj));
1445 ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1447 Map* map = obj->map();
1448 Heap* heap = obj->GetHeap();
1449 MarkBit mark = Marking::MarkBitFrom(obj);
1450 heap->mark_compact_collector()->SetMark(obj, mark);
1451 // Mark the map pointer and the body.
1452 MarkBit map_mark = Marking::MarkBitFrom(map);
1453 heap->mark_compact_collector()->MarkObject(map, map_mark);
1454 IterateBody(map, obj);
1457 // Visit all unmarked objects pointed to by [start, end).
1458 // Returns false if the operation fails (lack of stack space).
1459 INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1462 // Return false is we are close to the stack limit.
1463 StackLimitCheck check(heap->isolate());
1464 if (check.HasOverflowed()) return false;
1466 MarkCompactCollector* collector = heap->mark_compact_collector();
1467 // Visit the unmarked objects.
1468 for (Object** p = start; p < end; p++) {
1470 if (!o->IsHeapObject()) continue;
1471 collector->RecordSlot(start, p, o);
1472 HeapObject* obj = HeapObject::cast(o);
1473 MarkBit mark = Marking::MarkBitFrom(obj);
1474 if (mark.Get()) continue;
1475 VisitUnmarkedObject(collector, obj);
1480 INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1481 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1482 shared->BeforeVisitingPointers();
1485 static void VisitWeakCollection(Map* map, HeapObject* object) {
1486 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1487 JSWeakCollection* weak_collection =
1488 reinterpret_cast<JSWeakCollection*>(object);
1490 // Enqueue weak map in linked list of encountered weak maps.
1491 if (weak_collection->next() == Smi::FromInt(0)) {
1492 weak_collection->set_next(collector->encountered_weak_collections());
1493 collector->set_encountered_weak_collections(weak_collection);
1496 // Skip visiting the backing hash table containing the mappings.
1497 int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1498 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1501 JSWeakCollection::BodyDescriptor::kStartOffset,
1502 JSWeakCollection::kTableOffset);
1503 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1506 JSWeakCollection::kTableOffset + kPointerSize,
1509 // Mark the backing hash table without pushing it on the marking stack.
1510 Object* table_object = weak_collection->table();
1511 if (!table_object->IsHashTable()) return;
1512 WeakHashTable* table = WeakHashTable::cast(table_object);
1513 Object** table_slot =
1514 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1515 MarkBit table_mark = Marking::MarkBitFrom(table);
1516 collector->RecordSlot(table_slot, table_slot, table);
1517 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1518 // Recording the map slot can be skipped, because maps are not compacted.
1519 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1520 ASSERT(MarkCompactCollector::IsMarked(table->map()));
1525 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1527 // Code flushing support.
1529 static const int kRegExpCodeThreshold = 5;
1531 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1534 // Make sure that the fixed array is in fact initialized on the RegExp.
1535 // We could potentially trigger a GC when initializing the RegExp.
1536 if (HeapObject::cast(re->data())->map()->instance_type() !=
1537 FIXED_ARRAY_TYPE) return;
1539 // Make sure this is a RegExp that actually contains code.
1540 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1542 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1543 if (!code->IsSmi() &&
1544 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1545 // Save a copy that can be reinstated if we need the code again.
1546 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1548 // Saving a copy might create a pointer into compaction candidate
1549 // that was not observed by marker. This might happen if JSRegExp data
1550 // was marked through the compilation cache before marker reached JSRegExp
1552 FixedArray* data = FixedArray::cast(re->data());
1553 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1554 heap->mark_compact_collector()->
1555 RecordSlot(slot, slot, code);
1557 // Set a number in the 0-255 range to guarantee no smi overflow.
1558 re->SetDataAt(JSRegExp::code_index(is_ascii),
1559 Smi::FromInt(heap->sweep_generation() & 0xff));
1560 } else if (code->IsSmi()) {
1561 int value = Smi::cast(code)->value();
1562 // The regexp has not been compiled yet or there was a compilation error.
1563 if (value == JSRegExp::kUninitializedValue ||
1564 value == JSRegExp::kCompilationErrorValue) {
1568 // Check if we should flush now.
1569 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1570 re->SetDataAt(JSRegExp::code_index(is_ascii),
1571 Smi::FromInt(JSRegExp::kUninitializedValue));
1572 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1573 Smi::FromInt(JSRegExp::kUninitializedValue));
1579 // Works by setting the current sweep_generation (as a smi) in the
1580 // code object place in the data array of the RegExp and keeps a copy
1581 // around that can be reinstated if we reuse the RegExp before flushing.
1582 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1583 // we flush the code.
1584 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1585 Heap* heap = map->GetHeap();
1586 MarkCompactCollector* collector = heap->mark_compact_collector();
1587 if (!collector->is_code_flushing_enabled()) {
1588 VisitJSRegExp(map, object);
1591 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1592 // Flush code or set age on both ASCII and two byte code.
1593 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1594 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1595 // Visit the fields of the RegExp, including the updated FixedArray.
1596 VisitJSRegExp(map, object);
1599 static VisitorDispatchTable<Callback> non_count_table_;
1603 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1604 FixedArrayBase* fixed_array,
1605 FixedArraySubInstanceType fast_type,
1606 FixedArraySubInstanceType dictionary_type) {
1607 Heap* heap = fixed_array->map()->GetHeap();
1608 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1609 fixed_array->map() != heap->fixed_double_array_map() &&
1610 fixed_array != heap->empty_fixed_array()) {
1611 if (fixed_array->IsDictionary()) {
1612 heap->RecordFixedArraySubTypeStats(dictionary_type,
1613 fixed_array->Size());
1615 heap->RecordFixedArraySubTypeStats(fast_type,
1616 fixed_array->Size());
1622 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1623 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1624 Heap* heap = map->GetHeap();
1625 int object_size = obj->Size();
1626 heap->RecordObjectStats(map->instance_type(), object_size);
1627 non_count_table_.GetVisitorById(id)(map, obj);
1628 if (obj->IsJSObject()) {
1629 JSObject* object = JSObject::cast(obj);
1630 ObjectStatsCountFixedArray(object->elements(),
1631 DICTIONARY_ELEMENTS_SUB_TYPE,
1632 FAST_ELEMENTS_SUB_TYPE);
1633 ObjectStatsCountFixedArray(object->properties(),
1634 DICTIONARY_PROPERTIES_SUB_TYPE,
1635 FAST_PROPERTIES_SUB_TYPE);
1640 template<MarkCompactMarkingVisitor::VisitorId id>
1641 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1642 Map* map, HeapObject* obj) {
1643 ObjectStatsVisitBase(id, map, obj);
1648 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1649 MarkCompactMarkingVisitor::kVisitMap> {
1651 static inline void Visit(Map* map, HeapObject* obj) {
1652 Heap* heap = map->GetHeap();
1653 Map* map_obj = Map::cast(obj);
1654 ASSERT(map->instance_type() == MAP_TYPE);
1655 DescriptorArray* array = map_obj->instance_descriptors();
1656 if (map_obj->owns_descriptors() &&
1657 array != heap->empty_descriptor_array()) {
1658 int fixed_array_size = array->Size();
1659 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1662 if (map_obj->HasTransitionArray()) {
1663 int fixed_array_size = map_obj->transitions()->Size();
1664 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1667 if (map_obj->has_code_cache()) {
1668 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1669 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1670 cache->default_cache()->Size());
1671 if (!cache->normal_type_cache()->IsUndefined()) {
1672 heap->RecordFixedArraySubTypeStats(
1673 MAP_CODE_CACHE_SUB_TYPE,
1674 FixedArray::cast(cache->normal_type_cache())->Size());
1677 ObjectStatsVisitBase(kVisitMap, map, obj);
1683 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1684 MarkCompactMarkingVisitor::kVisitCode> {
1686 static inline void Visit(Map* map, HeapObject* obj) {
1687 Heap* heap = map->GetHeap();
1688 int object_size = obj->Size();
1689 ASSERT(map->instance_type() == CODE_TYPE);
1690 Code* code_obj = Code::cast(obj);
1691 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1693 ObjectStatsVisitBase(kVisitCode, map, obj);
1699 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1700 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1702 static inline void Visit(Map* map, HeapObject* obj) {
1703 Heap* heap = map->GetHeap();
1704 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1705 if (sfi->scope_info() != heap->empty_fixed_array()) {
1706 heap->RecordFixedArraySubTypeStats(
1707 SCOPE_INFO_SUB_TYPE,
1708 FixedArray::cast(sfi->scope_info())->Size());
1710 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1716 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1717 MarkCompactMarkingVisitor::kVisitFixedArray> {
1719 static inline void Visit(Map* map, HeapObject* obj) {
1720 Heap* heap = map->GetHeap();
1721 FixedArray* fixed_array = FixedArray::cast(obj);
1722 if (fixed_array == heap->string_table()) {
1723 heap->RecordFixedArraySubTypeStats(
1724 STRING_TABLE_SUB_TYPE,
1725 fixed_array->Size());
1727 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1732 void MarkCompactMarkingVisitor::Initialize() {
1733 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1735 table_.Register(kVisitJSRegExp,
1736 &VisitRegExpAndFlushCode);
1738 if (FLAG_track_gc_object_stats) {
1739 // Copy the visitor table to make call-through possible.
1740 non_count_table_.CopyFrom(&table_);
1741 #define VISITOR_ID_COUNT_FUNCTION(id) \
1742 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1743 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1744 #undef VISITOR_ID_COUNT_FUNCTION
1749 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1750 MarkCompactMarkingVisitor::non_count_table_;
1753 class CodeMarkingVisitor : public ThreadVisitor {
1755 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1756 : collector_(collector) {}
1758 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1759 collector_->PrepareThreadForCodeFlushing(isolate, top);
1763 MarkCompactCollector* collector_;
1767 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1769 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1770 : collector_(collector) {}
1772 void VisitPointers(Object** start, Object** end) {
1773 for (Object** p = start; p < end; p++) VisitPointer(p);
1776 void VisitPointer(Object** slot) {
1777 Object* obj = *slot;
1778 if (obj->IsSharedFunctionInfo()) {
1779 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1780 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1781 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1782 collector_->MarkObject(shared->code(), code_mark);
1783 collector_->MarkObject(shared, shared_mark);
1788 MarkCompactCollector* collector_;
1792 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1793 ThreadLocalTop* top) {
1794 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1795 // Note: for the frame that has a pending lazy deoptimization
1796 // StackFrame::unchecked_code will return a non-optimized code object for
1797 // the outermost function and StackFrame::LookupCode will return
1798 // actual optimized code object.
1799 StackFrame* frame = it.frame();
1800 Code* code = frame->unchecked_code();
1801 MarkBit code_mark = Marking::MarkBitFrom(code);
1802 MarkObject(code, code_mark);
1803 if (frame->is_optimized()) {
1804 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1805 frame->LookupCode());
1811 void MarkCompactCollector::PrepareForCodeFlushing() {
1812 // Enable code flushing for non-incremental cycles.
1813 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1814 EnableCodeFlushing(!was_marked_incrementally_);
1817 // If code flushing is disabled, there is no need to prepare for it.
1818 if (!is_code_flushing_enabled()) return;
1820 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1821 // relies on it being marked before any other descriptor array.
1822 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1823 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1824 MarkObject(descriptor_array, descriptor_array_mark);
1826 // Make sure we are not referencing the code from the stack.
1827 ASSERT(this == heap()->mark_compact_collector());
1828 PrepareThreadForCodeFlushing(heap()->isolate(),
1829 heap()->isolate()->thread_local_top());
1831 // Iterate the archived stacks in all threads to check if
1832 // the code is referenced.
1833 CodeMarkingVisitor code_marking_visitor(this);
1834 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1835 &code_marking_visitor);
1837 SharedFunctionInfoMarkingVisitor visitor(this);
1838 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1839 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1841 ProcessMarkingDeque();
1845 // Visitor class for marking heap roots.
1846 class RootMarkingVisitor : public ObjectVisitor {
1848 explicit RootMarkingVisitor(Heap* heap)
1849 : collector_(heap->mark_compact_collector()) { }
1851 void VisitPointer(Object** p) {
1852 MarkObjectByPointer(p);
1855 void VisitPointers(Object** start, Object** end) {
1856 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1859 // Skip the weak next code link in a code object, which is visited in
1860 // ProcessTopOptimizedFrame.
1861 void VisitNextCodeLink(Object** p) { }
1864 void MarkObjectByPointer(Object** p) {
1865 if (!(*p)->IsHeapObject()) return;
1867 // Replace flat cons strings in place.
1868 HeapObject* object = ShortCircuitConsString(p);
1869 MarkBit mark_bit = Marking::MarkBitFrom(object);
1870 if (mark_bit.Get()) return;
1872 Map* map = object->map();
1874 collector_->SetMark(object, mark_bit);
1876 // Mark the map pointer and body, and push them on the marking stack.
1877 MarkBit map_mark = Marking::MarkBitFrom(map);
1878 collector_->MarkObject(map, map_mark);
1879 MarkCompactMarkingVisitor::IterateBody(map, object);
1881 // Mark all the objects reachable from the map and body. May leave
1882 // overflowed objects in the heap.
1883 collector_->EmptyMarkingDeque();
1886 MarkCompactCollector* collector_;
1890 // Helper class for pruning the string table.
1891 template<bool finalize_external_strings>
1892 class StringTableCleaner : public ObjectVisitor {
1894 explicit StringTableCleaner(Heap* heap)
1895 : heap_(heap), pointers_removed_(0) { }
1897 virtual void VisitPointers(Object** start, Object** end) {
1898 // Visit all HeapObject pointers in [start, end).
1899 for (Object** p = start; p < end; p++) {
1901 if (o->IsHeapObject() &&
1902 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1903 if (finalize_external_strings) {
1904 ASSERT(o->IsExternalString());
1905 heap_->FinalizeExternalString(String::cast(*p));
1907 pointers_removed_++;
1909 // Set the entry to the_hole_value (as deleted).
1910 *p = heap_->the_hole_value();
1915 int PointersRemoved() {
1916 ASSERT(!finalize_external_strings);
1917 return pointers_removed_;
1922 int pointers_removed_;
1926 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1927 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1930 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1932 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1934 virtual Object* RetainAs(Object* object) {
1935 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1937 } else if (object->IsAllocationSite() &&
1938 !(AllocationSite::cast(object)->IsZombie())) {
1939 // "dead" AllocationSites need to live long enough for a traversal of new
1940 // space. These sites get a one-time reprieve.
1941 AllocationSite* site = AllocationSite::cast(object);
1943 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1952 // Fill the marking stack with overflowed objects returned by the given
1953 // iterator. Stop when the marking stack is filled or the end of the space
1954 // is reached, whichever comes first.
1956 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1957 MarkingDeque* marking_deque,
1959 // The caller should ensure that the marking stack is initially not full,
1960 // so that we don't waste effort pointlessly scanning for objects.
1961 ASSERT(!marking_deque->IsFull());
1963 Map* filler_map = heap->one_pointer_filler_map();
1964 for (HeapObject* object = it->Next();
1966 object = it->Next()) {
1967 MarkBit markbit = Marking::MarkBitFrom(object);
1968 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1969 Marking::GreyToBlack(markbit);
1970 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1971 marking_deque->PushBlack(object);
1972 if (marking_deque->IsFull()) return;
1978 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1981 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1983 ASSERT(!marking_deque->IsFull());
1984 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1985 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1986 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1987 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1989 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1990 Address cell_base = it.CurrentCellBase();
1991 MarkBit::CellType* cell = it.CurrentCell();
1993 const MarkBit::CellType current_cell = *cell;
1994 if (current_cell == 0) continue;
1996 MarkBit::CellType grey_objects;
1998 const MarkBit::CellType next_cell = *(cell+1);
1999 grey_objects = current_cell &
2000 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2002 grey_objects = current_cell & (current_cell >> 1);
2006 while (grey_objects != 0) {
2007 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2008 grey_objects >>= trailing_zeros;
2009 offset += trailing_zeros;
2010 MarkBit markbit(cell, 1 << offset, false);
2011 ASSERT(Marking::IsGrey(markbit));
2012 Marking::GreyToBlack(markbit);
2013 Address addr = cell_base + offset * kPointerSize;
2014 HeapObject* object = HeapObject::FromAddress(addr);
2015 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2016 marking_deque->PushBlack(object);
2017 if (marking_deque->IsFull()) return;
2022 grey_objects >>= (Bitmap::kBitsPerCell - 1);
2027 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
2028 NewSpace* new_space,
2030 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2031 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2032 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2033 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2035 MarkBit::CellType* cells = p->markbits()->cells();
2036 int survivors_size = 0;
2038 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2039 Address cell_base = it.CurrentCellBase();
2040 MarkBit::CellType* cell = it.CurrentCell();
2042 MarkBit::CellType current_cell = *cell;
2043 if (current_cell == 0) continue;
2046 while (current_cell != 0) {
2047 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2048 current_cell >>= trailing_zeros;
2049 offset += trailing_zeros;
2050 Address address = cell_base + offset * kPointerSize;
2051 HeapObject* object = HeapObject::FromAddress(address);
2053 int size = object->Size();
2054 survivors_size += size;
2056 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
2060 // Aggressively promote young survivors to the old space.
2061 if (TryPromoteObject(object, size)) {
2065 // Promotion failed. Just migrate object to another semispace.
2066 AllocationResult allocation = new_space->AllocateRaw(size);
2067 if (allocation.IsRetry()) {
2068 if (!new_space->AddFreshPage()) {
2069 // Shouldn't happen. We are sweeping linearly, and to-space
2070 // has the same number of pages as from-space, so there is
2074 allocation = new_space->AllocateRaw(size);
2075 ASSERT(!allocation.IsRetry());
2077 Object* target = allocation.ToObjectChecked();
2079 MigrateObject(HeapObject::cast(target),
2086 return survivors_size;
2090 static void DiscoverGreyObjectsInSpace(Heap* heap,
2091 MarkingDeque* marking_deque,
2092 PagedSpace* space) {
2093 if (!space->was_swept_conservatively()) {
2094 HeapObjectIterator it(space);
2095 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2097 PageIterator it(space);
2098 while (it.has_next()) {
2099 Page* p = it.next();
2100 DiscoverGreyObjectsOnPage(marking_deque, p);
2101 if (marking_deque->IsFull()) return;
2107 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2108 MarkingDeque* marking_deque) {
2109 NewSpace* space = heap->new_space();
2110 NewSpacePageIterator it(space->bottom(), space->top());
2111 while (it.has_next()) {
2112 NewSpacePage* page = it.next();
2113 DiscoverGreyObjectsOnPage(marking_deque, page);
2114 if (marking_deque->IsFull()) return;
2119 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2121 if (!o->IsHeapObject()) return false;
2122 HeapObject* heap_object = HeapObject::cast(o);
2123 MarkBit mark = Marking::MarkBitFrom(heap_object);
2128 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2131 ASSERT(o->IsHeapObject());
2132 HeapObject* heap_object = HeapObject::cast(o);
2133 MarkBit mark = Marking::MarkBitFrom(heap_object);
2138 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2139 StringTable* string_table = heap()->string_table();
2140 // Mark the string table itself.
2141 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2142 if (!string_table_mark.Get()) {
2143 // String table could have already been marked by visiting the handles list.
2144 SetMark(string_table, string_table_mark);
2146 // Explicitly mark the prefix.
2147 string_table->IteratePrefix(visitor);
2148 ProcessMarkingDeque();
2152 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2153 MarkBit mark_bit = Marking::MarkBitFrom(site);
2154 SetMark(site, mark_bit);
2158 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2159 // Mark the heap roots including global variables, stack variables,
2160 // etc., and all objects reachable from them.
2161 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2163 // Handle the string table specially.
2164 MarkStringTable(visitor);
2166 MarkWeakObjectToCodeTable();
2168 // There may be overflowed objects in the heap. Visit them now.
2169 while (marking_deque_.overflowed()) {
2170 RefillMarkingDeque();
2171 EmptyMarkingDeque();
2176 void MarkCompactCollector::MarkImplicitRefGroups() {
2177 List<ImplicitRefGroup*>* ref_groups =
2178 isolate()->global_handles()->implicit_ref_groups();
2181 for (int i = 0; i < ref_groups->length(); i++) {
2182 ImplicitRefGroup* entry = ref_groups->at(i);
2183 ASSERT(entry != NULL);
2185 if (!IsMarked(*entry->parent)) {
2186 (*ref_groups)[last++] = entry;
2190 Object*** children = entry->children;
2191 // A parent object is marked, so mark all child heap objects.
2192 for (size_t j = 0; j < entry->length; ++j) {
2193 if ((*children[j])->IsHeapObject()) {
2194 HeapObject* child = HeapObject::cast(*children[j]);
2195 MarkBit mark = Marking::MarkBitFrom(child);
2196 MarkObject(child, mark);
2200 // Once the entire group has been marked, dispose it because it's
2201 // not needed anymore.
2204 ref_groups->Rewind(last);
2208 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2209 HeapObject* weak_object_to_code_table =
2210 HeapObject::cast(heap()->weak_object_to_code_table());
2211 if (!IsMarked(weak_object_to_code_table)) {
2212 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2213 SetMark(weak_object_to_code_table, mark);
2218 // Mark all objects reachable from the objects on the marking stack.
2219 // Before: the marking stack contains zero or more heap object pointers.
2220 // After: the marking stack is empty, and all objects reachable from the
2221 // marking stack have been marked, or are overflowed in the heap.
2222 void MarkCompactCollector::EmptyMarkingDeque() {
2223 while (!marking_deque_.IsEmpty()) {
2224 HeapObject* object = marking_deque_.Pop();
2225 ASSERT(object->IsHeapObject());
2226 ASSERT(heap()->Contains(object));
2227 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2229 Map* map = object->map();
2230 MarkBit map_mark = Marking::MarkBitFrom(map);
2231 MarkObject(map, map_mark);
2233 MarkCompactMarkingVisitor::IterateBody(map, object);
2238 // Sweep the heap for overflowed objects, clear their overflow bits, and
2239 // push them on the marking stack. Stop early if the marking stack fills
2240 // before sweeping completes. If sweeping completes, there are no remaining
2241 // overflowed objects in the heap so the overflow flag on the markings stack
2243 void MarkCompactCollector::RefillMarkingDeque() {
2244 ASSERT(marking_deque_.overflowed());
2246 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2247 if (marking_deque_.IsFull()) return;
2249 DiscoverGreyObjectsInSpace(heap(),
2251 heap()->old_pointer_space());
2252 if (marking_deque_.IsFull()) return;
2254 DiscoverGreyObjectsInSpace(heap(),
2256 heap()->old_data_space());
2257 if (marking_deque_.IsFull()) return;
2259 DiscoverGreyObjectsInSpace(heap(),
2261 heap()->code_space());
2262 if (marking_deque_.IsFull()) return;
2264 DiscoverGreyObjectsInSpace(heap(),
2266 heap()->map_space());
2267 if (marking_deque_.IsFull()) return;
2269 DiscoverGreyObjectsInSpace(heap(),
2271 heap()->cell_space());
2272 if (marking_deque_.IsFull()) return;
2274 DiscoverGreyObjectsInSpace(heap(),
2276 heap()->property_cell_space());
2277 if (marking_deque_.IsFull()) return;
2279 LargeObjectIterator lo_it(heap()->lo_space());
2280 DiscoverGreyObjectsWithIterator(heap(),
2283 if (marking_deque_.IsFull()) return;
2285 marking_deque_.ClearOverflowed();
2289 // Mark all objects reachable (transitively) from objects on the marking
2290 // stack. Before: the marking stack contains zero or more heap object
2291 // pointers. After: the marking stack is empty and there are no overflowed
2292 // objects in the heap.
2293 void MarkCompactCollector::ProcessMarkingDeque() {
2294 EmptyMarkingDeque();
2295 while (marking_deque_.overflowed()) {
2296 RefillMarkingDeque();
2297 EmptyMarkingDeque();
2302 // Mark all objects reachable (transitively) from objects on the marking
2303 // stack including references only considered in the atomic marking pause.
2304 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2305 bool work_to_do = true;
2306 ASSERT(marking_deque_.IsEmpty());
2307 while (work_to_do) {
2308 isolate()->global_handles()->IterateObjectGroups(
2309 visitor, &IsUnmarkedHeapObjectWithHeap);
2310 MarkImplicitRefGroups();
2311 ProcessWeakCollections();
2312 work_to_do = !marking_deque_.IsEmpty();
2313 ProcessMarkingDeque();
2318 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2319 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2320 !it.done(); it.Advance()) {
2321 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2324 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2325 Code* code = it.frame()->LookupCode();
2326 if (!code->CanDeoptAt(it.frame()->pc())) {
2327 code->CodeIterateBody(visitor);
2329 ProcessMarkingDeque();
2336 void MarkCompactCollector::MarkLiveObjects() {
2337 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2338 // The recursive GC marker detects when it is nearing stack overflow,
2339 // and switches to a different marking system. JS interrupts interfere
2340 // with the C stack limit check.
2341 PostponeInterruptsScope postpone(isolate());
2343 bool incremental_marking_overflowed = false;
2344 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2345 if (was_marked_incrementally_) {
2346 // Finalize the incremental marking and check whether we had an overflow.
2347 // Both markers use grey color to mark overflowed objects so
2348 // non-incremental marker can deal with them as if overflow
2349 // occured during normal marking.
2350 // But incremental marker uses a separate marking deque
2351 // so we have to explicitly copy its overflow state.
2352 incremental_marking->Finalize();
2353 incremental_marking_overflowed =
2354 incremental_marking->marking_deque()->overflowed();
2355 incremental_marking->marking_deque()->ClearOverflowed();
2357 // Abort any pending incremental activities e.g. incremental sweeping.
2358 incremental_marking->Abort();
2362 ASSERT(state_ == PREPARE_GC);
2363 state_ = MARK_LIVE_OBJECTS;
2365 // The to space contains live objects, a page in from space is used as a
2367 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2368 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2369 if (FLAG_force_marking_deque_overflows) {
2370 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2372 marking_deque_.Initialize(marking_deque_start,
2374 ASSERT(!marking_deque_.overflowed());
2376 if (incremental_marking_overflowed) {
2377 // There are overflowed objects left in the heap after incremental marking.
2378 marking_deque_.SetOverflowed();
2381 PrepareForCodeFlushing();
2383 if (was_marked_incrementally_) {
2384 // There is no write barrier on cells so we have to scan them now at the end
2385 // of the incremental marking.
2387 HeapObjectIterator cell_iterator(heap()->cell_space());
2389 while ((cell = cell_iterator.Next()) != NULL) {
2390 ASSERT(cell->IsCell());
2391 if (IsMarked(cell)) {
2392 int offset = Cell::kValueOffset;
2393 MarkCompactMarkingVisitor::VisitPointer(
2395 reinterpret_cast<Object**>(cell->address() + offset));
2400 HeapObjectIterator js_global_property_cell_iterator(
2401 heap()->property_cell_space());
2403 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2404 ASSERT(cell->IsPropertyCell());
2405 if (IsMarked(cell)) {
2406 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2412 RootMarkingVisitor root_visitor(heap());
2413 MarkRoots(&root_visitor);
2415 ProcessTopOptimizedFrame(&root_visitor);
2417 // The objects reachable from the roots are marked, yet unreachable
2418 // objects are unmarked. Mark objects reachable due to host
2419 // application specific logic or through Harmony weak maps.
2420 ProcessEphemeralMarking(&root_visitor);
2422 // The objects reachable from the roots, weak maps or object groups
2423 // are marked, yet unreachable objects are unmarked. Mark objects
2424 // reachable only from weak global handles.
2426 // First we identify nonlive weak handles and mark them as pending
2428 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2429 &IsUnmarkedHeapObject);
2430 // Then we mark the objects and process the transitive closure.
2431 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2432 while (marking_deque_.overflowed()) {
2433 RefillMarkingDeque();
2434 EmptyMarkingDeque();
2437 // Repeat host application specific and Harmony weak maps marking to
2438 // mark unmarked objects reachable from the weak roots.
2439 ProcessEphemeralMarking(&root_visitor);
2445 void MarkCompactCollector::AfterMarking() {
2446 // Object literal map caches reference strings (cache keys) and maps
2447 // (cache values). At this point still useful maps have already been
2448 // marked. Mark the keys for the alive values before we process the
2452 // Prune the string table removing all strings only pointed to by the
2453 // string table. Cannot use string_table() here because the string
2455 StringTable* string_table = heap()->string_table();
2456 InternalizedStringTableCleaner internalized_visitor(heap());
2457 string_table->IterateElements(&internalized_visitor);
2458 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2460 ExternalStringTableCleaner external_visitor(heap());
2461 heap()->external_string_table_.Iterate(&external_visitor);
2462 heap()->external_string_table_.CleanUp();
2464 // Process the weak references.
2465 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2466 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2468 // Remove object groups after marking phase.
2469 heap()->isolate()->global_handles()->RemoveObjectGroups();
2470 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2472 // Flush code from collected candidates.
2473 if (is_code_flushing_enabled()) {
2474 code_flusher_->ProcessCandidates();
2475 // If incremental marker does not support code flushing, we need to
2476 // disable it before incremental marking steps for next cycle.
2477 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2478 EnableCodeFlushing(false);
2482 if (FLAG_track_gc_object_stats) {
2483 heap()->CheckpointObjectStats();
2488 void MarkCompactCollector::ProcessMapCaches() {
2489 Object* raw_context = heap()->native_contexts_list_;
2490 while (raw_context != heap()->undefined_value()) {
2491 Context* context = reinterpret_cast<Context*>(raw_context);
2492 if (IsMarked(context)) {
2493 HeapObject* raw_map_cache =
2494 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2495 // A map cache may be reachable from the stack. In this case
2496 // it's already transitively marked and it's too late to clean
2498 if (!IsMarked(raw_map_cache) &&
2499 raw_map_cache != heap()->undefined_value()) {
2500 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2501 int existing_elements = map_cache->NumberOfElements();
2502 int used_elements = 0;
2503 for (int i = MapCache::kElementsStartIndex;
2504 i < map_cache->length();
2505 i += MapCache::kEntrySize) {
2506 Object* raw_key = map_cache->get(i);
2507 if (raw_key == heap()->undefined_value() ||
2508 raw_key == heap()->the_hole_value()) continue;
2509 STATIC_ASSERT(MapCache::kEntrySize == 2);
2510 Object* raw_map = map_cache->get(i + 1);
2511 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2514 // Delete useless entries with unmarked maps.
2515 ASSERT(raw_map->IsMap());
2516 map_cache->set_the_hole(i);
2517 map_cache->set_the_hole(i + 1);
2520 if (used_elements == 0) {
2521 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2523 // Note: we don't actually shrink the cache here to avoid
2524 // extra complexity during GC. We rely on subsequent cache
2525 // usages (EnsureCapacity) to do this.
2526 map_cache->ElementsRemoved(existing_elements - used_elements);
2527 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2528 MarkObject(map_cache, map_cache_markbit);
2532 // Move to next element in the list.
2533 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2535 ProcessMarkingDeque();
2539 void MarkCompactCollector::ReattachInitialMaps() {
2540 HeapObjectIterator map_iterator(heap()->map_space());
2541 for (HeapObject* obj = map_iterator.Next();
2543 obj = map_iterator.Next()) {
2544 Map* map = Map::cast(obj);
2546 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2547 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2549 if (map->attached_to_shared_function_info()) {
2550 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2556 void MarkCompactCollector::ClearNonLiveReferences() {
2557 // Iterate over the map space, setting map transitions that go from
2558 // a marked map to an unmarked map to null transitions. This action
2559 // is carried out only on maps of JSObjects and related subtypes.
2560 HeapObjectIterator map_iterator(heap()->map_space());
2561 for (HeapObject* obj = map_iterator.Next();
2563 obj = map_iterator.Next()) {
2564 Map* map = Map::cast(obj);
2566 if (!map->CanTransition()) continue;
2568 MarkBit map_mark = Marking::MarkBitFrom(map);
2569 if (map_mark.Get() && map->attached_to_shared_function_info()) {
2570 // This map is used for inobject slack tracking and has been detached
2571 // from SharedFunctionInfo during the mark phase.
2572 // Since it survived the GC, reattach it now.
2573 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2576 ClearNonLivePrototypeTransitions(map);
2577 ClearNonLiveMapTransitions(map, map_mark);
2579 if (map_mark.Get()) {
2580 ClearNonLiveDependentCode(map->dependent_code());
2582 ClearDependentCode(map->dependent_code());
2583 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2587 // Iterate over property cell space, removing dependent code that is not
2588 // otherwise kept alive by strong references.
2589 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2590 for (HeapObject* cell = cell_iterator.Next();
2592 cell = cell_iterator.Next()) {
2593 if (IsMarked(cell)) {
2594 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2598 // Iterate over allocation sites, removing dependent code that is not
2599 // otherwise kept alive by strong references.
2600 Object* undefined = heap()->undefined_value();
2601 for (Object* site = heap()->allocation_sites_list();
2603 site = AllocationSite::cast(site)->weak_next()) {
2604 if (IsMarked(site)) {
2605 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2609 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2610 WeakHashTable* table =
2611 WeakHashTable::cast(heap_->weak_object_to_code_table());
2612 uint32_t capacity = table->Capacity();
2613 for (uint32_t i = 0; i < capacity; i++) {
2614 uint32_t key_index = table->EntryToIndex(i);
2615 Object* key = table->get(key_index);
2616 if (!table->IsKey(key)) continue;
2617 uint32_t value_index = table->EntryToValueIndex(i);
2618 Object* value = table->get(value_index);
2619 if (key->IsCell() && !IsMarked(key)) {
2620 Cell* cell = Cell::cast(key);
2621 Object* object = cell->value();
2622 if (IsMarked(object)) {
2623 MarkBit mark = Marking::MarkBitFrom(cell);
2624 SetMark(cell, mark);
2625 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2626 RecordSlot(value_slot, value_slot, *value_slot);
2629 if (IsMarked(key)) {
2630 if (!IsMarked(value)) {
2631 HeapObject* obj = HeapObject::cast(value);
2632 MarkBit mark = Marking::MarkBitFrom(obj);
2635 ClearNonLiveDependentCode(DependentCode::cast(value));
2637 ClearDependentCode(DependentCode::cast(value));
2638 table->set(key_index, heap_->the_hole_value());
2639 table->set(value_index, heap_->the_hole_value());
2646 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2647 int number_of_transitions = map->NumberOfProtoTransitions();
2648 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2650 int new_number_of_transitions = 0;
2651 const int header = Map::kProtoTransitionHeaderSize;
2652 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2653 const int map_offset = header + Map::kProtoTransitionMapOffset;
2654 const int step = Map::kProtoTransitionElementsPerEntry;
2655 for (int i = 0; i < number_of_transitions; i++) {
2656 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2657 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2658 if (IsMarked(prototype) && IsMarked(cached_map)) {
2659 ASSERT(!prototype->IsUndefined());
2660 int proto_index = proto_offset + new_number_of_transitions * step;
2661 int map_index = map_offset + new_number_of_transitions * step;
2662 if (new_number_of_transitions != i) {
2663 prototype_transitions->set(
2666 UPDATE_WRITE_BARRIER);
2667 prototype_transitions->set(
2670 SKIP_WRITE_BARRIER);
2672 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2673 RecordSlot(slot, slot, prototype);
2674 new_number_of_transitions++;
2678 if (new_number_of_transitions != number_of_transitions) {
2679 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2682 // Fill slots that became free with undefined value.
2683 for (int i = new_number_of_transitions * step;
2684 i < number_of_transitions * step;
2686 prototype_transitions->set_undefined(header + i);
2691 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2693 Object* potential_parent = map->GetBackPointer();
2694 if (!potential_parent->IsMap()) return;
2695 Map* parent = Map::cast(potential_parent);
2697 // Follow back pointer, check whether we are dealing with a map transition
2698 // from a live map to a dead path and in case clear transitions of parent.
2699 bool current_is_alive = map_mark.Get();
2700 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2701 if (!current_is_alive && parent_is_alive) {
2702 parent->ClearNonLiveTransitions(heap());
2707 void MarkCompactCollector::ClearDependentICList(Object* head) {
2708 Object* current = head;
2709 Object* undefined = heap()->undefined_value();
2710 while (current != undefined) {
2711 Code* code = Code::cast(current);
2712 if (IsMarked(code)) {
2713 ASSERT(code->is_weak_stub());
2714 IC::InvalidateMaps(code);
2716 current = code->next_code_link();
2717 code->set_next_code_link(undefined);
2722 void MarkCompactCollector::ClearDependentCode(
2723 DependentCode* entries) {
2724 DisallowHeapAllocation no_allocation;
2725 DependentCode::GroupStartIndexes starts(entries);
2726 int number_of_entries = starts.number_of_entries();
2727 if (number_of_entries == 0) return;
2728 int g = DependentCode::kWeakICGroup;
2729 if (starts.at(g) != starts.at(g + 1)) {
2730 int i = starts.at(g);
2731 ASSERT(i + 1 == starts.at(g + 1));
2732 Object* head = entries->object_at(i);
2733 ClearDependentICList(head);
2735 g = DependentCode::kWeakCodeGroup;
2736 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2737 // If the entry is compilation info then the map must be alive,
2738 // and ClearDependentCode shouldn't be called.
2739 ASSERT(entries->is_code_at(i));
2740 Code* code = entries->code_at(i);
2741 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2742 code->set_marked_for_deoptimization(true);
2743 code->InvalidateEmbeddedObjects();
2744 have_code_to_deoptimize_ = true;
2747 for (int i = 0; i < number_of_entries; i++) {
2748 entries->clear_at(i);
2753 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
2754 DependentCode* entries, int group, int start, int end, int new_start) {
2756 if (group == DependentCode::kWeakICGroup) {
2757 // Dependent weak IC stubs form a linked list and only the head is stored
2758 // in the dependent code array.
2760 ASSERT(start + 1 == end);
2761 Object* old_head = entries->object_at(start);
2762 MarkCompactWeakObjectRetainer retainer;
2763 Object* head = VisitWeakList<Code>(heap(), old_head, &retainer, true);
2764 entries->set_object_at(new_start, head);
2765 Object** slot = entries->slot_at(new_start);
2766 RecordSlot(slot, slot, head);
2767 // We do not compact this group even if the head is undefined,
2768 // more dependent ICs are likely to be added later.
2772 for (int i = start; i < end; i++) {
2773 Object* obj = entries->object_at(i);
2774 ASSERT(obj->IsCode() || IsMarked(obj));
2775 if (IsMarked(obj) &&
2776 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2777 if (new_start + survived != i) {
2778 entries->set_object_at(new_start + survived, obj);
2780 Object** slot = entries->slot_at(new_start + survived);
2781 RecordSlot(slot, slot, obj);
2786 entries->set_number_of_entries(
2787 static_cast<DependentCode::DependencyGroup>(group), survived);
2792 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2793 DisallowHeapAllocation no_allocation;
2794 DependentCode::GroupStartIndexes starts(entries);
2795 int number_of_entries = starts.number_of_entries();
2796 if (number_of_entries == 0) return;
2797 int new_number_of_entries = 0;
2798 // Go through all groups, remove dead codes and compact.
2799 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2800 int survived = ClearNonLiveDependentCodeInGroup(
2801 entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
2802 new_number_of_entries += survived;
2804 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2805 entries->clear_at(i);
2810 void MarkCompactCollector::ProcessWeakCollections() {
2811 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2812 Object* weak_collection_obj = encountered_weak_collections();
2813 while (weak_collection_obj != Smi::FromInt(0)) {
2814 ASSERT(MarkCompactCollector::IsMarked(
2815 HeapObject::cast(weak_collection_obj)));
2816 JSWeakCollection* weak_collection =
2817 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2818 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2819 Object** anchor = reinterpret_cast<Object**>(table->address());
2820 for (int i = 0; i < table->Capacity(); i++) {
2821 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2823 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2824 RecordSlot(anchor, key_slot, *key_slot);
2825 Object** value_slot =
2826 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2827 MarkCompactMarkingVisitor::MarkObjectByPointer(
2828 this, anchor, value_slot);
2831 weak_collection_obj = weak_collection->next();
2836 void MarkCompactCollector::ClearWeakCollections() {
2837 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2838 Object* weak_collection_obj = encountered_weak_collections();
2839 while (weak_collection_obj != Smi::FromInt(0)) {
2840 ASSERT(MarkCompactCollector::IsMarked(
2841 HeapObject::cast(weak_collection_obj)));
2842 JSWeakCollection* weak_collection =
2843 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2844 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2845 for (int i = 0; i < table->Capacity(); i++) {
2846 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2847 table->RemoveEntry(i);
2850 weak_collection_obj = weak_collection->next();
2851 weak_collection->set_next(Smi::FromInt(0));
2853 set_encountered_weak_collections(Smi::FromInt(0));
2857 // We scavange new space simultaneously with sweeping. This is done in two
2860 // The first pass migrates all alive objects from one semispace to another or
2861 // promotes them to old space. Forwarding address is written directly into
2862 // first word of object without any encoding. If object is dead we write
2863 // NULL as a forwarding address.
2865 // The second pass updates pointers to new space in all spaces. It is possible
2866 // to encounter pointers to dead new space objects during traversal of pointers
2867 // to new space. We should clear them to avoid encountering them during next
2868 // pointer iteration. This is an issue if the store buffer overflows and we
2869 // have to scan the entire old space, including dead objects, looking for
2870 // pointers to new space.
2871 void MarkCompactCollector::MigrateObject(HeapObject* dst,
2874 AllocationSpace dest) {
2875 Address dst_addr = dst->address();
2876 Address src_addr = src->address();
2877 HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2878 if (heap_profiler->is_tracking_object_moves()) {
2879 heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
2881 ASSERT(heap()->AllowedToBeMigrated(src, dest));
2882 ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2883 if (dest == OLD_POINTER_SPACE) {
2884 Address src_slot = src_addr;
2885 Address dst_slot = dst_addr;
2886 ASSERT(IsAligned(size, kPointerSize));
2888 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2889 Object* value = Memory::Object_at(src_slot);
2891 Memory::Object_at(dst_slot) = value;
2893 if (heap_->InNewSpace(value)) {
2894 heap_->store_buffer()->Mark(dst_slot);
2895 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2896 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2897 &migration_slots_buffer_,
2898 reinterpret_cast<Object**>(dst_slot),
2899 SlotsBuffer::IGNORE_OVERFLOW);
2902 src_slot += kPointerSize;
2903 dst_slot += kPointerSize;
2906 if (compacting_ && dst->IsJSFunction()) {
2907 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2908 Address code_entry = Memory::Address_at(code_entry_slot);
2910 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2911 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2912 &migration_slots_buffer_,
2913 SlotsBuffer::CODE_ENTRY_SLOT,
2915 SlotsBuffer::IGNORE_OVERFLOW);
2917 } else if (compacting_ && dst->IsConstantPoolArray()) {
2918 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
2919 for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
2920 Address code_entry_slot =
2921 dst_addr + constant_pool->OffsetOfElementAt(i);
2922 Address code_entry = Memory::Address_at(code_entry_slot);
2924 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2925 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2926 &migration_slots_buffer_,
2927 SlotsBuffer::CODE_ENTRY_SLOT,
2929 SlotsBuffer::IGNORE_OVERFLOW);
2933 } else if (dest == CODE_SPACE) {
2934 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2935 heap()->MoveBlock(dst_addr, src_addr, size);
2936 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2937 &migration_slots_buffer_,
2938 SlotsBuffer::RELOCATED_CODE_OBJECT,
2940 SlotsBuffer::IGNORE_OVERFLOW);
2941 Code::cast(dst)->Relocate(dst_addr - src_addr);
2943 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2944 heap()->MoveBlock(dst_addr, src_addr, size);
2946 Memory::Address_at(src_addr) = dst_addr;
2950 // Visitor for updating pointers from live objects in old spaces to new space.
2951 // It does not expect to encounter pointers to dead objects.
2952 class PointersUpdatingVisitor: public ObjectVisitor {
2954 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2956 void VisitPointer(Object** p) {
2960 void VisitPointers(Object** start, Object** end) {
2961 for (Object** p = start; p < end; p++) UpdatePointer(p);
2964 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2965 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2966 Object* target = rinfo->target_object();
2967 Object* old_target = target;
2968 VisitPointer(&target);
2969 // Avoid unnecessary changes that might unnecessary flush the instruction
2971 if (target != old_target) {
2972 rinfo->set_target_object(target);
2976 void VisitCodeTarget(RelocInfo* rinfo) {
2977 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2978 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2979 Object* old_target = target;
2980 VisitPointer(&target);
2981 if (target != old_target) {
2982 rinfo->set_target_address(Code::cast(target)->instruction_start());
2986 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2987 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2988 Object* stub = rinfo->code_age_stub();
2989 ASSERT(stub != NULL);
2990 VisitPointer(&stub);
2991 if (stub != rinfo->code_age_stub()) {
2992 rinfo->set_code_age_stub(Code::cast(stub));
2996 void VisitDebugTarget(RelocInfo* rinfo) {
2997 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2998 rinfo->IsPatchedReturnSequence()) ||
2999 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
3000 rinfo->IsPatchedDebugBreakSlotSequence()));
3001 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
3002 VisitPointer(&target);
3003 rinfo->set_call_address(Code::cast(target)->instruction_start());
3006 static inline void UpdateSlot(Heap* heap, Object** slot) {
3007 Object* obj = *slot;
3009 if (!obj->IsHeapObject()) return;
3011 HeapObject* heap_obj = HeapObject::cast(obj);
3013 MapWord map_word = heap_obj->map_word();
3014 if (map_word.IsForwardingAddress()) {
3015 ASSERT(heap->InFromSpace(heap_obj) ||
3016 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
3017 HeapObject* target = map_word.ToForwardingAddress();
3019 ASSERT(!heap->InFromSpace(target) &&
3020 !MarkCompactCollector::IsOnEvacuationCandidate(target));
3025 inline void UpdatePointer(Object** p) {
3026 UpdateSlot(heap_, p);
3033 static void UpdatePointer(HeapObject** address, HeapObject* object) {
3034 Address new_addr = Memory::Address_at(object->address());
3036 // The new space sweep will overwrite the map word of dead objects
3037 // with NULL. In this case we do not need to transfer this entry to
3038 // the store buffer which we are rebuilding.
3039 // We perform the pointer update with a no barrier compare-and-swap. The
3040 // compare and swap may fail in the case where the pointer update tries to
3041 // update garbage memory which was concurrently accessed by the sweeper.
3042 if (new_addr != NULL) {
3043 NoBarrier_CompareAndSwap(
3044 reinterpret_cast<AtomicWord*>(address),
3045 reinterpret_cast<AtomicWord>(object),
3046 reinterpret_cast<AtomicWord>(HeapObject::FromAddress(new_addr)));
3048 // We have to zap this pointer, because the store buffer may overflow later,
3049 // and then we have to scan the entire heap and we don't want to find
3050 // spurious newspace pointers in the old space.
3051 // TODO(mstarzinger): This was changed to a sentinel value to track down
3052 // rare crashes, change it back to Smi::FromInt(0) later.
3053 NoBarrier_CompareAndSwap(
3054 reinterpret_cast<AtomicWord*>(address),
3055 reinterpret_cast<AtomicWord>(object),
3056 reinterpret_cast<AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
3061 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3063 MapWord map_word = HeapObject::cast(*p)->map_word();
3065 if (map_word.IsForwardingAddress()) {
3066 return String::cast(map_word.ToForwardingAddress());
3069 return String::cast(*p);
3073 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3075 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
3077 OldSpace* target_space = heap()->TargetSpace(object);
3079 ASSERT(target_space == heap()->old_pointer_space() ||
3080 target_space == heap()->old_data_space());
3082 AllocationResult allocation = target_space->AllocateRaw(object_size);
3083 if (allocation.To(&target)) {
3084 MigrateObject(target,
3087 target_space->identity());
3088 heap()->mark_compact_collector()->tracer()->
3089 increment_promoted_objects_size(object_size);
3097 void MarkCompactCollector::EvacuateNewSpace() {
3098 // There are soft limits in the allocation code, designed trigger a mark
3099 // sweep collection by failing allocations. But since we are already in
3100 // a mark-sweep allocation, there is no sense in trying to trigger one.
3101 AlwaysAllocateScope scope(isolate());
3102 heap()->CheckNewSpaceExpansionCriteria();
3104 NewSpace* new_space = heap()->new_space();
3106 // Store allocation range before flipping semispaces.
3107 Address from_bottom = new_space->bottom();
3108 Address from_top = new_space->top();
3110 // Flip the semispaces. After flipping, to space is empty, from space has
3113 new_space->ResetAllocationInfo();
3115 int survivors_size = 0;
3117 // First pass: traverse all objects in inactive semispace, remove marks,
3118 // migrate live objects and write forwarding addresses. This stage puts
3119 // new entries in the store buffer and may cause some pages to be marked
3120 // scan-on-scavenge.
3121 NewSpacePageIterator it(from_bottom, from_top);
3122 while (it.has_next()) {
3123 NewSpacePage* p = it.next();
3124 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3127 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3128 new_space->set_age_mark(new_space->top());
3132 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3133 AlwaysAllocateScope always_allocate(isolate());
3134 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3135 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3136 p->MarkSweptPrecisely();
3140 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3141 Address cell_base = it.CurrentCellBase();
3142 MarkBit::CellType* cell = it.CurrentCell();
3144 if (*cell == 0) continue;
3146 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3147 for (int i = 0; i < live_objects; i++) {
3148 Address object_addr = cell_base + offsets[i] * kPointerSize;
3149 HeapObject* object = HeapObject::FromAddress(object_addr);
3150 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3152 int size = object->Size();
3154 HeapObject* target_object;
3155 AllocationResult allocation = space->AllocateRaw(size);
3156 if (!allocation.To(&target_object)) {
3157 // OS refused to give us memory.
3158 V8::FatalProcessOutOfMemory("Evacuation");
3162 MigrateObject(target_object, object, size, space->identity());
3163 ASSERT(object->map_word().IsForwardingAddress());
3166 // Clear marking bits for current cell.
3169 p->ResetLiveBytes();
3173 void MarkCompactCollector::EvacuatePages() {
3174 int npages = evacuation_candidates_.length();
3175 for (int i = 0; i < npages; i++) {
3176 Page* p = evacuation_candidates_[i];
3177 ASSERT(p->IsEvacuationCandidate() ||
3178 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3179 ASSERT(static_cast<int>(p->parallel_sweeping()) ==
3180 MemoryChunk::PARALLEL_SWEEPING_DONE);
3181 if (p->IsEvacuationCandidate()) {
3182 // During compaction we might have to request a new page.
3183 // Check that space still have room for that.
3184 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3185 EvacuateLiveObjectsFromPage(p);
3187 // Without room for expansion evacuation is not guaranteed to succeed.
3188 // Pessimistically abandon unevacuated pages.
3189 for (int j = i; j < npages; j++) {
3190 Page* page = evacuation_candidates_[j];
3191 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3192 page->ClearEvacuationCandidate();
3193 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3202 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3204 virtual Object* RetainAs(Object* object) {
3205 if (object->IsHeapObject()) {
3206 HeapObject* heap_object = HeapObject::cast(object);
3207 MapWord map_word = heap_object->map_word();
3208 if (map_word.IsForwardingAddress()) {
3209 return map_word.ToForwardingAddress();
3217 static inline void UpdateSlot(Isolate* isolate,
3219 SlotsBuffer::SlotType slot_type,
3221 switch (slot_type) {
3222 case SlotsBuffer::CODE_TARGET_SLOT: {
3223 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3224 rinfo.Visit(isolate, v);
3227 case SlotsBuffer::CODE_ENTRY_SLOT: {
3228 v->VisitCodeEntry(addr);
3231 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3232 HeapObject* obj = HeapObject::FromAddress(addr);
3233 Code::cast(obj)->CodeIterateBody(v);
3236 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3237 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3238 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3241 case SlotsBuffer::JS_RETURN_SLOT: {
3242 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3243 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3246 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3247 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3248 rinfo.Visit(isolate, v);
3260 SWEEP_AND_VISIT_LIVE_OBJECTS
3264 enum SkipListRebuildingMode {
3270 enum FreeSpaceTreatmentMode {
3276 // Sweep a space precisely. After this has been done the space can
3277 // be iterated precisely, hitting only the live objects. Code space
3278 // is always swept precisely because we want to be able to iterate
3279 // over it. Map space is swept precisely, because it is not compacted.
3280 // Slots in live objects pointing into evacuation candidates are updated
3282 template<SweepingMode sweeping_mode,
3283 SkipListRebuildingMode skip_list_mode,
3284 FreeSpaceTreatmentMode free_space_mode>
3285 static void SweepPrecisely(PagedSpace* space,
3288 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3289 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3290 space->identity() == CODE_SPACE);
3291 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3293 double start_time = 0.0;
3294 if (FLAG_print_cumulative_gc_stat) {
3295 start_time = OS::TimeCurrentMillis();
3298 p->MarkSweptPrecisely();
3300 Address free_start = p->area_start();
3301 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3304 SkipList* skip_list = p->skip_list();
3305 int curr_region = -1;
3306 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3310 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3311 Address cell_base = it.CurrentCellBase();
3312 MarkBit::CellType* cell = it.CurrentCell();
3313 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3315 for ( ; live_objects != 0; live_objects--) {
3316 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3317 if (free_end != free_start) {
3318 if (free_space_mode == ZAP_FREE_SPACE) {
3319 memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
3321 space->Free(free_start, static_cast<int>(free_end - free_start));
3322 #ifdef ENABLE_GDB_JIT_INTERFACE
3323 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3324 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3328 HeapObject* live_object = HeapObject::FromAddress(free_end);
3329 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3330 Map* map = live_object->map();
3331 int size = live_object->SizeFromMap(map);
3332 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3333 live_object->IterateBody(map->instance_type(), size, v);
3335 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3336 int new_region_start =
3337 SkipList::RegionNumber(free_end);
3338 int new_region_end =
3339 SkipList::RegionNumber(free_end + size - kPointerSize);
3340 if (new_region_start != curr_region ||
3341 new_region_end != curr_region) {
3342 skip_list->AddObject(free_end, size);
3343 curr_region = new_region_end;
3346 free_start = free_end + size;
3348 // Clear marking bits for current cell.
3351 if (free_start != p->area_end()) {
3352 if (free_space_mode == ZAP_FREE_SPACE) {
3353 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
3355 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3356 #ifdef ENABLE_GDB_JIT_INTERFACE
3357 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3358 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3362 p->ResetLiveBytes();
3363 if (FLAG_print_cumulative_gc_stat) {
3364 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3369 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3370 Page* p = Page::FromAddress(code->address());
3372 if (p->IsEvacuationCandidate() ||
3373 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3377 Address code_start = code->address();
3378 Address code_end = code_start + code->Size();
3380 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3381 uint32_t end_index =
3382 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3384 Bitmap* b = p->markbits();
3386 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3387 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3389 MarkBit::CellType* start_cell = start_mark_bit.cell();
3390 MarkBit::CellType* end_cell = end_mark_bit.cell();
3393 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3394 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3396 if (start_cell == end_cell) {
3397 *start_cell |= start_mask & end_mask;
3399 *start_cell |= start_mask;
3400 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3403 *end_cell |= end_mask;
3406 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3415 static bool IsOnInvalidatedCodeObject(Address addr) {
3416 // We did not record any slots in large objects thus
3417 // we can safely go to the page from the slot address.
3418 Page* p = Page::FromAddress(addr);
3420 // First check owner's identity because old pointer and old data spaces
3421 // are swept lazily and might still have non-zero mark-bits on some
3423 if (p->owner()->identity() != CODE_SPACE) return false;
3425 // In code space only bits on evacuation candidates (but we don't record
3426 // any slots on them) and under invalidated code objects are non-zero.
3428 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3430 return mark_bit.Get();
3434 void MarkCompactCollector::InvalidateCode(Code* code) {
3435 if (heap_->incremental_marking()->IsCompacting() &&
3436 !ShouldSkipEvacuationSlotRecording(code)) {
3437 ASSERT(compacting_);
3439 // If the object is white than no slots were recorded on it yet.
3440 MarkBit mark_bit = Marking::MarkBitFrom(code);
3441 if (Marking::IsWhite(mark_bit)) return;
3443 invalidated_code_.Add(code);
3448 // Return true if the given code is deoptimized or will be deoptimized.
3449 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3450 return code->is_optimized_code() && code->marked_for_deoptimization();
3454 bool MarkCompactCollector::MarkInvalidatedCode() {
3455 bool code_marked = false;
3457 int length = invalidated_code_.length();
3458 for (int i = 0; i < length; i++) {
3459 Code* code = invalidated_code_[i];
3461 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3470 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3471 int length = invalidated_code_.length();
3472 for (int i = 0; i < length; i++) {
3473 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3478 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3479 int length = invalidated_code_.length();
3480 for (int i = 0; i < length; i++) {
3481 Code* code = invalidated_code_[i];
3483 code->Iterate(visitor);
3484 SetMarkBitsUnderInvalidatedCode(code, false);
3487 invalidated_code_.Rewind(0);
3491 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3492 Heap::RelocationLock relocation_lock(heap());
3494 bool code_slots_filtering_required;
3495 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3496 code_slots_filtering_required = MarkInvalidatedCode();
3500 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3504 // Second pass: find pointers to new space and update them.
3505 PointersUpdatingVisitor updating_visitor(heap());
3507 { GCTracer::Scope gc_scope(tracer_,
3508 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3509 // Update pointers in to space.
3510 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3511 heap()->new_space()->top());
3512 for (HeapObject* object = to_it.Next();
3514 object = to_it.Next()) {
3515 Map* map = object->map();
3516 object->IterateBody(map->instance_type(),
3517 object->SizeFromMap(map),
3522 { GCTracer::Scope gc_scope(tracer_,
3523 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3525 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3528 { GCTracer::Scope gc_scope(tracer_,
3529 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3530 StoreBufferRebuildScope scope(heap_,
3531 heap_->store_buffer(),
3532 &Heap::ScavengeStoreBufferCallback);
3533 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3537 { GCTracer::Scope gc_scope(tracer_,
3538 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3539 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3540 migration_slots_buffer_,
3541 code_slots_filtering_required);
3542 if (FLAG_trace_fragmentation) {
3543 PrintF(" migration slots buffer: %d\n",
3544 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3547 if (compacting_ && was_marked_incrementally_) {
3548 // It's difficult to filter out slots recorded for large objects.
3549 LargeObjectIterator it(heap_->lo_space());
3550 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3551 // LargeObjectSpace is not swept yet thus we have to skip
3552 // dead objects explicitly.
3553 if (!IsMarked(obj)) continue;
3555 Page* p = Page::FromAddress(obj->address());
3556 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3557 obj->Iterate(&updating_visitor);
3558 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3564 int npages = evacuation_candidates_.length();
3565 { GCTracer::Scope gc_scope(
3566 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3567 for (int i = 0; i < npages; i++) {
3568 Page* p = evacuation_candidates_[i];
3569 ASSERT(p->IsEvacuationCandidate() ||
3570 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3572 if (p->IsEvacuationCandidate()) {
3573 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3575 code_slots_filtering_required);
3576 if (FLAG_trace_fragmentation) {
3577 PrintF(" page %p slots buffer: %d\n",
3578 reinterpret_cast<void*>(p),
3579 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3582 // Important: skip list should be cleared only after roots were updated
3583 // because root iteration traverses the stack and might have to find
3584 // code objects from non-updated pc pointing into evacuation candidate.
3585 SkipList* list = p->skip_list();
3586 if (list != NULL) list->Clear();
3588 if (FLAG_gc_verbose) {
3589 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3590 reinterpret_cast<intptr_t>(p));
3592 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3593 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3595 switch (space->identity()) {
3596 case OLD_DATA_SPACE:
3597 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3599 case OLD_POINTER_SPACE:
3600 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3603 space, p, &updating_visitor);
3606 if (FLAG_zap_code_space) {
3607 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3610 space, p, &updating_visitor);
3612 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3615 space, p, &updating_visitor);
3626 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3628 // Update pointers from cells.
3629 HeapObjectIterator cell_iterator(heap_->cell_space());
3630 for (HeapObject* cell = cell_iterator.Next();
3632 cell = cell_iterator.Next()) {
3633 if (cell->IsCell()) {
3634 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3638 HeapObjectIterator js_global_property_cell_iterator(
3639 heap_->property_cell_space());
3640 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3642 cell = js_global_property_cell_iterator.Next()) {
3643 if (cell->IsPropertyCell()) {
3644 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3648 // Update the head of the native contexts list in the heap.
3649 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3651 heap_->string_table()->Iterate(&updating_visitor);
3652 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3653 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3654 WeakHashTable* table =
3655 WeakHashTable::cast(heap_->weak_object_to_code_table());
3656 table->Iterate(&updating_visitor);
3657 table->Rehash(heap_->isolate()->factory()->undefined_value());
3660 // Update pointers from external string table.
3661 heap_->UpdateReferencesInExternalStringTable(
3662 &UpdateReferenceInExternalStringTableEntry);
3664 EvacuationWeakObjectRetainer evacuation_object_retainer;
3665 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3667 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3669 ProcessInvalidatedCode(&updating_visitor);
3671 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3674 if (FLAG_verify_heap) {
3675 VerifyEvacuation(heap_);
3679 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3680 ASSERT(migration_slots_buffer_ == NULL);
3684 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3685 int npages = evacuation_candidates_.length();
3686 for (int i = 0; i < npages; i++) {
3687 Page* p = evacuation_candidates_[i];
3688 if (!p->IsEvacuationCandidate()) continue;
3690 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3691 p->InsertAfter(space->LastPage());
3696 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3697 int npages = evacuation_candidates_.length();
3698 for (int i = 0; i < npages; i++) {
3699 Page* p = evacuation_candidates_[i];
3700 if (!p->IsEvacuationCandidate()) continue;
3701 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3702 space->Free(p->area_start(), p->area_size());
3703 p->set_scan_on_scavenge(false);
3704 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3705 p->ResetLiveBytes();
3706 space->ReleasePage(p);
3708 evacuation_candidates_.Rewind(0);
3709 compacting_ = false;
3710 heap()->FreeQueuedChunks();
3714 static const int kStartTableEntriesPerLine = 5;
3715 static const int kStartTableLines = 171;
3716 static const int kStartTableInvalidLine = 127;
3717 static const int kStartTableUnusedEntry = 126;
3719 #define _ kStartTableUnusedEntry
3720 #define X kStartTableInvalidLine
3721 // Mark-bit to object start offset table.
3723 // The line is indexed by the mark bits in a byte. The first number on
3724 // the line describes the number of live object starts for the line and the
3725 // other numbers on the line describe the offsets (in words) of the object
3728 // Since objects are at least 2 words large we don't have entries for two
3729 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3730 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3741 2, 1, 3, _, _, // 10
3742 X, _, _, _, _, // 11
3743 X, _, _, _, _, // 12
3744 X, _, _, _, _, // 13
3745 X, _, _, _, _, // 14
3746 X, _, _, _, _, // 15
3747 1, 4, _, _, _, // 16
3748 2, 0, 4, _, _, // 17
3749 2, 1, 4, _, _, // 18
3750 X, _, _, _, _, // 19
3751 2, 2, 4, _, _, // 20
3752 3, 0, 2, 4, _, // 21
3753 X, _, _, _, _, // 22
3754 X, _, _, _, _, // 23
3755 X, _, _, _, _, // 24
3756 X, _, _, _, _, // 25
3757 X, _, _, _, _, // 26
3758 X, _, _, _, _, // 27
3759 X, _, _, _, _, // 28
3760 X, _, _, _, _, // 29
3761 X, _, _, _, _, // 30
3762 X, _, _, _, _, // 31
3763 1, 5, _, _, _, // 32
3764 2, 0, 5, _, _, // 33
3765 2, 1, 5, _, _, // 34
3766 X, _, _, _, _, // 35
3767 2, 2, 5, _, _, // 36
3768 3, 0, 2, 5, _, // 37
3769 X, _, _, _, _, // 38
3770 X, _, _, _, _, // 39
3771 2, 3, 5, _, _, // 40
3772 3, 0, 3, 5, _, // 41
3773 3, 1, 3, 5, _, // 42
3774 X, _, _, _, _, // 43
3775 X, _, _, _, _, // 44
3776 X, _, _, _, _, // 45
3777 X, _, _, _, _, // 46
3778 X, _, _, _, _, // 47
3779 X, _, _, _, _, // 48
3780 X, _, _, _, _, // 49
3781 X, _, _, _, _, // 50
3782 X, _, _, _, _, // 51
3783 X, _, _, _, _, // 52
3784 X, _, _, _, _, // 53
3785 X, _, _, _, _, // 54
3786 X, _, _, _, _, // 55
3787 X, _, _, _, _, // 56
3788 X, _, _, _, _, // 57
3789 X, _, _, _, _, // 58
3790 X, _, _, _, _, // 59
3791 X, _, _, _, _, // 60
3792 X, _, _, _, _, // 61
3793 X, _, _, _, _, // 62
3794 X, _, _, _, _, // 63
3795 1, 6, _, _, _, // 64
3796 2, 0, 6, _, _, // 65
3797 2, 1, 6, _, _, // 66
3798 X, _, _, _, _, // 67
3799 2, 2, 6, _, _, // 68
3800 3, 0, 2, 6, _, // 69
3801 X, _, _, _, _, // 70
3802 X, _, _, _, _, // 71
3803 2, 3, 6, _, _, // 72
3804 3, 0, 3, 6, _, // 73
3805 3, 1, 3, 6, _, // 74
3806 X, _, _, _, _, // 75
3807 X, _, _, _, _, // 76
3808 X, _, _, _, _, // 77
3809 X, _, _, _, _, // 78
3810 X, _, _, _, _, // 79
3811 2, 4, 6, _, _, // 80
3812 3, 0, 4, 6, _, // 81
3813 3, 1, 4, 6, _, // 82
3814 X, _, _, _, _, // 83
3815 3, 2, 4, 6, _, // 84
3816 4, 0, 2, 4, 6, // 85
3817 X, _, _, _, _, // 86
3818 X, _, _, _, _, // 87
3819 X, _, _, _, _, // 88
3820 X, _, _, _, _, // 89
3821 X, _, _, _, _, // 90
3822 X, _, _, _, _, // 91
3823 X, _, _, _, _, // 92
3824 X, _, _, _, _, // 93
3825 X, _, _, _, _, // 94
3826 X, _, _, _, _, // 95
3827 X, _, _, _, _, // 96
3828 X, _, _, _, _, // 97
3829 X, _, _, _, _, // 98
3830 X, _, _, _, _, // 99
3831 X, _, _, _, _, // 100
3832 X, _, _, _, _, // 101
3833 X, _, _, _, _, // 102
3834 X, _, _, _, _, // 103
3835 X, _, _, _, _, // 104
3836 X, _, _, _, _, // 105
3837 X, _, _, _, _, // 106
3838 X, _, _, _, _, // 107
3839 X, _, _, _, _, // 108
3840 X, _, _, _, _, // 109
3841 X, _, _, _, _, // 110
3842 X, _, _, _, _, // 111
3843 X, _, _, _, _, // 112
3844 X, _, _, _, _, // 113
3845 X, _, _, _, _, // 114
3846 X, _, _, _, _, // 115
3847 X, _, _, _, _, // 116
3848 X, _, _, _, _, // 117
3849 X, _, _, _, _, // 118
3850 X, _, _, _, _, // 119
3851 X, _, _, _, _, // 120
3852 X, _, _, _, _, // 121
3853 X, _, _, _, _, // 122
3854 X, _, _, _, _, // 123
3855 X, _, _, _, _, // 124
3856 X, _, _, _, _, // 125
3857 X, _, _, _, _, // 126
3858 X, _, _, _, _, // 127
3859 1, 7, _, _, _, // 128
3860 2, 0, 7, _, _, // 129
3861 2, 1, 7, _, _, // 130
3862 X, _, _, _, _, // 131
3863 2, 2, 7, _, _, // 132
3864 3, 0, 2, 7, _, // 133
3865 X, _, _, _, _, // 134
3866 X, _, _, _, _, // 135
3867 2, 3, 7, _, _, // 136
3868 3, 0, 3, 7, _, // 137
3869 3, 1, 3, 7, _, // 138
3870 X, _, _, _, _, // 139
3871 X, _, _, _, _, // 140
3872 X, _, _, _, _, // 141
3873 X, _, _, _, _, // 142
3874 X, _, _, _, _, // 143
3875 2, 4, 7, _, _, // 144
3876 3, 0, 4, 7, _, // 145
3877 3, 1, 4, 7, _, // 146
3878 X, _, _, _, _, // 147
3879 3, 2, 4, 7, _, // 148
3880 4, 0, 2, 4, 7, // 149
3881 X, _, _, _, _, // 150
3882 X, _, _, _, _, // 151
3883 X, _, _, _, _, // 152
3884 X, _, _, _, _, // 153
3885 X, _, _, _, _, // 154
3886 X, _, _, _, _, // 155
3887 X, _, _, _, _, // 156
3888 X, _, _, _, _, // 157
3889 X, _, _, _, _, // 158
3890 X, _, _, _, _, // 159
3891 2, 5, 7, _, _, // 160
3892 3, 0, 5, 7, _, // 161
3893 3, 1, 5, 7, _, // 162
3894 X, _, _, _, _, // 163
3895 3, 2, 5, 7, _, // 164
3896 4, 0, 2, 5, 7, // 165
3897 X, _, _, _, _, // 166
3898 X, _, _, _, _, // 167
3899 3, 3, 5, 7, _, // 168
3900 4, 0, 3, 5, 7, // 169
3901 4, 1, 3, 5, 7 // 170
3907 // Takes a word of mark bits. Returns the number of objects that start in the
3908 // range. Puts the offsets of the words in the supplied array.
3909 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3913 // No consecutive 1 bits.
3914 ASSERT((mark_bits & 0x180) != 0x180);
3915 ASSERT((mark_bits & 0x18000) != 0x18000);
3916 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3918 while (mark_bits != 0) {
3919 int byte = (mark_bits & 0xff);
3922 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3923 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3924 int objects_in_these_8_words = table[0];
3925 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3926 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3927 for (int i = 0; i < objects_in_these_8_words; i++) {
3928 starts[objects++] = offset + table[1 + i];
3937 static inline Address DigestFreeStart(Address approximate_free_start,
3938 uint32_t free_start_cell) {
3939 ASSERT(free_start_cell != 0);
3941 // No consecutive 1 bits.
3942 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3945 uint32_t cell = free_start_cell;
3946 int offset_of_last_live;
3947 if ((cell & 0x80000000u) != 0) {
3948 // This case would overflow below.
3949 offset_of_last_live = 31;
3951 // Remove all but one bit, the most significant. This is an optimization
3952 // that may or may not be worthwhile.
3958 cell = (cell + 1) >> 1;
3959 int live_objects = MarkWordToObjectStarts(cell, offsets);
3960 ASSERT(live_objects == 1);
3961 offset_of_last_live = offsets[live_objects - 1];
3963 Address last_live_start =
3964 approximate_free_start + offset_of_last_live * kPointerSize;
3965 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3966 Address free_start = last_live_start + last_live->Size();
3971 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3974 // No consecutive 1 bits.
3975 ASSERT((cell & (cell << 1)) == 0);
3978 if (cell == 0x80000000u) { // Avoid overflow below.
3979 return block_address + 31 * kPointerSize;
3981 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3982 ASSERT((first_set_bit & cell) == first_set_bit);
3983 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3984 ASSERT(live_objects == 1);
3986 return block_address + offsets[0] * kPointerSize;
3990 template<MarkCompactCollector::SweepingParallelism mode>
3991 static intptr_t Free(PagedSpace* space,
3992 FreeList* free_list,
3995 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3996 return space->Free(start, size);
3998 return size - free_list->Free(start, size);
4003 // Force instantiation of templatized SweepConservatively method for
4004 // SWEEP_SEQUENTIALLY mode.
4005 template intptr_t MarkCompactCollector::
4006 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
4007 PagedSpace*, FreeList*, Page*);
4010 // Force instantiation of templatized SweepConservatively method for
4011 // SWEEP_IN_PARALLEL mode.
4012 template intptr_t MarkCompactCollector::
4013 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
4014 PagedSpace*, FreeList*, Page*);
4017 // Sweeps a space conservatively. After this has been done the larger free
4018 // spaces have been put on the free list and the smaller ones have been
4019 // ignored and left untouched. A free space is always either ignored or put
4020 // on the free list, never split up into two parts. This is important
4021 // because it means that any FreeSpace maps left actually describe a region of
4022 // memory that can be ignored when scanning. Dead objects other than free
4023 // spaces will not contain the free space map.
4024 template<MarkCompactCollector::SweepingParallelism mode>
4025 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
4026 FreeList* free_list,
4028 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
4029 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
4030 free_list != NULL) ||
4031 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
4032 free_list == NULL));
4034 // When parallel sweeping is active, the page will be marked after
4035 // sweeping by the main thread.
4036 if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
4037 p->MarkSweptConservatively();
4040 intptr_t freed_bytes = 0;
4043 // Skip over all the dead objects at the start of the page and mark them free.
4044 Address cell_base = 0;
4045 MarkBit::CellType* cell = NULL;
4046 MarkBitCellIterator it(p);
4047 for (; !it.Done(); it.Advance()) {
4048 cell_base = it.CurrentCellBase();
4049 cell = it.CurrentCell();
4050 if (*cell != 0) break;
4054 size = p->area_end() - p->area_start();
4055 freed_bytes += Free<mode>(space, free_list, p->area_start(),
4056 static_cast<int>(size));
4057 ASSERT_EQ(0, p->LiveBytes());
4061 // Grow the size of the start-of-page free space a little to get up to the
4062 // first live object.
4063 Address free_end = StartOfLiveObject(cell_base, *cell);
4064 // Free the first free space.
4065 size = free_end - p->area_start();
4066 freed_bytes += Free<mode>(space, free_list, p->area_start(),
4067 static_cast<int>(size));
4069 // The start of the current free area is represented in undigested form by
4070 // the address of the last 32-word section that contained a live object and
4071 // the marking bitmap for that cell, which describes where the live object
4072 // started. Unless we find a large free space in the bitmap we will not
4073 // digest this pair into a real address. We start the iteration here at the
4074 // first word in the marking bit map that indicates a live object.
4075 Address free_start = cell_base;
4076 MarkBit::CellType free_start_cell = *cell;
4078 for (; !it.Done(); it.Advance()) {
4079 cell_base = it.CurrentCellBase();
4080 cell = it.CurrentCell();
4082 // We have a live object. Check approximately whether it is more than 32
4083 // words since the last live object.
4084 if (cell_base - free_start > 32 * kPointerSize) {
4085 free_start = DigestFreeStart(free_start, free_start_cell);
4086 if (cell_base - free_start > 32 * kPointerSize) {
4087 // Now that we know the exact start of the free space it still looks
4088 // like we have a large enough free space to be worth bothering with.
4089 // so now we need to find the start of the first live object at the
4090 // end of the free space.
4091 free_end = StartOfLiveObject(cell_base, *cell);
4092 freed_bytes += Free<mode>(space, free_list, free_start,
4093 static_cast<int>(free_end - free_start));
4096 // Update our undigested record of where the current free area started.
4097 free_start = cell_base;
4098 free_start_cell = *cell;
4099 // Clear marking bits for current cell.
4104 // Handle the free space at the end of the page.
4105 if (cell_base - free_start > 32 * kPointerSize) {
4106 free_start = DigestFreeStart(free_start, free_start_cell);
4107 freed_bytes += Free<mode>(space, free_list, free_start,
4108 static_cast<int>(p->area_end() - free_start));
4111 p->ResetLiveBytes();
4116 void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
4117 PageIterator it(space);
4118 FreeList* free_list = space == heap()->old_pointer_space()
4119 ? free_list_old_pointer_space_.get()
4120 : free_list_old_data_space_.get();
4121 FreeList private_free_list(space);
4122 while (it.has_next()) {
4123 Page* p = it.next();
4125 if (p->TryParallelSweeping()) {
4126 SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
4127 free_list->Concatenate(&private_free_list);
4128 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
4130 if (p == space->end_of_unswept_pages()) break;
4135 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4136 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
4137 sweeper == PARALLEL_CONSERVATIVE ||
4138 sweeper == CONCURRENT_CONSERVATIVE);
4139 space->ClearStats();
4141 // We defensively initialize end_of_unswept_pages_ here with the first page
4142 // of the pages list.
4143 space->set_end_of_unswept_pages(space->FirstPage());
4145 PageIterator it(space);
4147 int pages_swept = 0;
4148 bool unused_page_present = false;
4149 bool parallel_sweeping_active = false;
4151 while (it.has_next()) {
4152 Page* p = it.next();
4153 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4155 // Clear sweeping flags indicating that marking bits are still intact.
4156 p->ClearSweptPrecisely();
4157 p->ClearSweptConservatively();
4159 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
4160 p->IsEvacuationCandidate()) {
4161 // Will be processed in EvacuateNewSpaceAndCandidates.
4162 ASSERT(evacuation_candidates_.length() > 0);
4166 // One unused page is kept, all further are released before sweeping them.
4167 if (p->LiveBytes() == 0) {
4168 if (unused_page_present) {
4169 if (FLAG_gc_verbose) {
4170 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4171 reinterpret_cast<intptr_t>(p));
4173 // Adjust unswept free bytes because releasing a page expects said
4174 // counter to be accurate for unswept pages.
4175 space->IncreaseUnsweptFreeBytes(p);
4176 space->ReleasePage(p);
4179 unused_page_present = true;
4183 case CONSERVATIVE: {
4184 if (FLAG_gc_verbose) {
4185 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4186 reinterpret_cast<intptr_t>(p));
4188 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4192 case CONCURRENT_CONSERVATIVE:
4193 case PARALLEL_CONSERVATIVE: {
4194 if (!parallel_sweeping_active) {
4195 if (FLAG_gc_verbose) {
4196 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4197 reinterpret_cast<intptr_t>(p));
4199 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4201 parallel_sweeping_active = true;
4203 if (FLAG_gc_verbose) {
4204 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4205 reinterpret_cast<intptr_t>(p));
4207 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
4208 space->IncreaseUnsweptFreeBytes(p);
4210 space->set_end_of_unswept_pages(p);
4214 if (FLAG_gc_verbose) {
4215 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4216 reinterpret_cast<intptr_t>(p));
4218 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4219 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
4221 } else if (space->identity() == CODE_SPACE) {
4222 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
4225 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4237 if (FLAG_gc_verbose) {
4238 PrintF("SweepSpace: %s (%d pages swept)\n",
4239 AllocationSpaceName(space->identity()),
4243 // Give pages that are queued to be freed back to the OS.
4244 heap()->FreeQueuedChunks();
4248 void MarkCompactCollector::SweepSpaces() {
4249 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4251 state_ = SWEEP_SPACES;
4253 SweeperType how_to_sweep = CONSERVATIVE;
4254 if (AreSweeperThreadsActivated()) {
4255 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4256 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4258 if (sweep_precisely_) how_to_sweep = PRECISE;
4260 MoveEvacuationCandidatesToEndOfPagesList();
4262 // Noncompacting collections simply sweep the spaces to clear the mark
4263 // bits and free the nonlive blocks (for old and map spaces). We sweep
4264 // the map space last because freeing non-live maps overwrites them and
4265 // the other spaces rely on possibly non-live maps to get the sizes for
4266 // non-live objects.
4267 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4268 { SequentialSweepingScope scope(this);
4269 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4270 SweepSpace(heap()->old_data_space(), how_to_sweep);
4273 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4274 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4275 StartSweeperThreads();
4278 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4279 WaitUntilSweepingCompleted();
4282 RemoveDeadInvalidatedCode();
4283 SweepSpace(heap()->code_space(), PRECISE);
4285 SweepSpace(heap()->cell_space(), PRECISE);
4286 SweepSpace(heap()->property_cell_space(), PRECISE);
4288 EvacuateNewSpaceAndCandidates();
4290 // ClearNonLiveTransitions depends on precise sweeping of map space to
4291 // detect whether unmarked map became dead in this collection or in one
4292 // of the previous ones.
4293 SweepSpace(heap()->map_space(), PRECISE);
4295 // Deallocate unmarked objects and clear marked bits for marked objects.
4296 heap_->lo_space()->FreeUnmarkedObjects();
4298 // Deallocate evacuated candidate pages.
4299 ReleaseEvacuationCandidates();
4303 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4304 PageIterator it(space);
4305 while (it.has_next()) {
4306 Page* p = it.next();
4307 if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
4308 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
4309 p->MarkSweptConservatively();
4311 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4316 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4317 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4318 ParallelSweepSpaceComplete(heap()->old_data_space());
4322 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4323 if (isolate()->debug()->IsLoaded() ||
4324 isolate()->debug()->has_break_points()) {
4329 if (code_flusher_ != NULL) return;
4330 code_flusher_ = new CodeFlusher(isolate());
4332 if (code_flusher_ == NULL) return;
4333 code_flusher_->EvictAllCandidates();
4334 delete code_flusher_;
4335 code_flusher_ = NULL;
4338 if (FLAG_trace_code_flushing) {
4339 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4344 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4345 // Our profiling tools do not expect intersections between
4346 // code objects. We should either reenable it or change our tools.
4347 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4349 #ifdef ENABLE_GDB_JIT_INTERFACE
4350 if (obj->IsCode()) {
4351 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4354 if (obj->IsCode()) {
4355 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4360 Isolate* MarkCompactCollector::isolate() const {
4361 return heap_->isolate();
4365 void MarkCompactCollector::Initialize() {
4366 MarkCompactMarkingVisitor::Initialize();
4367 IncrementalMarking::Initialize();
4371 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4372 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4376 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4377 SlotsBuffer** buffer_address,
4380 AdditionMode mode) {
4381 SlotsBuffer* buffer = *buffer_address;
4382 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4383 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4384 allocator->DeallocateChain(buffer_address);
4387 buffer = allocator->AllocateBuffer(buffer);
4388 *buffer_address = buffer;
4390 ASSERT(buffer->HasSpaceForTypedSlot());
4391 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4392 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4397 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4398 if (RelocInfo::IsCodeTarget(rmode)) {
4399 return SlotsBuffer::CODE_TARGET_SLOT;
4400 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4401 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4402 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4403 return SlotsBuffer::DEBUG_TARGET_SLOT;
4404 } else if (RelocInfo::IsJSReturn(rmode)) {
4405 return SlotsBuffer::JS_RETURN_SLOT;
4408 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4412 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4413 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4414 RelocInfo::Mode rmode = rinfo->rmode();
4415 if (target_page->IsEvacuationCandidate() &&
4416 (rinfo->host() == NULL ||
4417 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4419 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4420 // This doesn't need to be typed since it is just a normal heap pointer.
4421 Object** target_pointer =
4422 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4423 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4424 target_page->slots_buffer_address(),
4426 SlotsBuffer::FAIL_ON_OVERFLOW);
4427 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4428 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4429 target_page->slots_buffer_address(),
4430 SlotsBuffer::CODE_ENTRY_SLOT,
4431 rinfo->constant_pool_entry_address(),
4432 SlotsBuffer::FAIL_ON_OVERFLOW);
4434 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4435 target_page->slots_buffer_address(),
4436 SlotTypeForRMode(rmode),
4438 SlotsBuffer::FAIL_ON_OVERFLOW);
4441 EvictEvacuationCandidate(target_page);
4447 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4448 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4449 if (target_page->IsEvacuationCandidate() &&
4450 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4451 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4452 target_page->slots_buffer_address(),
4453 SlotsBuffer::CODE_ENTRY_SLOT,
4455 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4456 EvictEvacuationCandidate(target_page);
4462 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4463 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4464 if (is_compacting()) {
4465 Code* host = isolate()->inner_pointer_to_code_cache()->
4466 GcSafeFindCodeForInnerPointer(pc);
4467 MarkBit mark_bit = Marking::MarkBitFrom(host);
4468 if (Marking::IsBlack(mark_bit)) {
4469 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4470 RecordRelocSlot(&rinfo, target);
4476 static inline SlotsBuffer::SlotType DecodeSlotType(
4477 SlotsBuffer::ObjectSlot slot) {
4478 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4482 void SlotsBuffer::UpdateSlots(Heap* heap) {
4483 PointersUpdatingVisitor v(heap);
4485 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4486 ObjectSlot slot = slots_[slot_idx];
4487 if (!IsTypedSlot(slot)) {
4488 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4491 ASSERT(slot_idx < idx_);
4492 UpdateSlot(heap->isolate(),
4494 DecodeSlotType(slot),
4495 reinterpret_cast<Address>(slots_[slot_idx]));
4501 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4502 PointersUpdatingVisitor v(heap);
4504 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4505 ObjectSlot slot = slots_[slot_idx];
4506 if (!IsTypedSlot(slot)) {
4507 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4508 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4512 ASSERT(slot_idx < idx_);
4513 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4514 if (!IsOnInvalidatedCodeObject(pc)) {
4515 UpdateSlot(heap->isolate(),
4517 DecodeSlotType(slot),
4518 reinterpret_cast<Address>(slots_[slot_idx]));
4525 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4526 return new SlotsBuffer(next_buffer);
4530 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4535 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4536 SlotsBuffer* buffer = *buffer_address;
4537 while (buffer != NULL) {
4538 SlotsBuffer* next_buffer = buffer->next();
4539 DeallocateBuffer(buffer);
4540 buffer = next_buffer;
4542 *buffer_address = NULL;
4546 } } // namespace v8::internal