1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "cpu-profiler.h"
33 #include "deoptimizer.h"
34 #include "execution.h"
36 #include "global-handles.h"
37 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 #include "sweeper-thread.h"
50 const char* Marking::kWhiteBitPattern = "00";
51 const char* Marking::kBlackBitPattern = "10";
52 const char* Marking::kGreyBitPattern = "11";
53 const char* Marking::kImpossibleBitPattern = "01";
56 // -------------------------------------------------------------------------
57 // MarkCompactCollector
59 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
63 sweep_precisely_(false),
64 reduce_memory_footprint_(false),
65 abort_incremental_marking_(false),
66 marking_parity_(ODD_MARKING_PARITY),
68 was_marked_incrementally_(false),
69 sweeping_pending_(false),
70 pending_sweeper_jobs_semaphore_(0),
71 sequential_sweeping_(false),
73 migration_slots_buffer_(NULL),
76 encountered_weak_collections_(NULL),
77 have_code_to_deoptimize_(false) { }
80 class VerifyMarkingVisitor: public ObjectVisitor {
82 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
84 void VisitPointers(Object** start, Object** end) {
85 for (Object** current = start; current < end; current++) {
86 if ((*current)->IsHeapObject()) {
87 HeapObject* object = HeapObject::cast(*current);
88 CHECK(heap_->mark_compact_collector()->IsMarked(object));
93 void VisitEmbeddedPointer(RelocInfo* rinfo) {
94 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
95 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
96 Object* p = rinfo->target_object();
101 void VisitCell(RelocInfo* rinfo) {
102 Code* code = rinfo->host();
103 ASSERT(rinfo->rmode() == RelocInfo::CELL);
104 if (!code->IsWeakObject(rinfo->target_cell())) {
105 ObjectVisitor::VisitCell(rinfo);
114 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
115 VerifyMarkingVisitor visitor(heap);
117 Address next_object_must_be_here_or_later = bottom;
119 for (Address current = bottom;
121 current += kPointerSize) {
122 object = HeapObject::FromAddress(current);
123 if (MarkCompactCollector::IsMarked(object)) {
124 CHECK(current >= next_object_must_be_here_or_later);
125 object->Iterate(&visitor);
126 next_object_must_be_here_or_later = current + object->Size();
132 static void VerifyMarking(NewSpace* space) {
133 Address end = space->top();
134 NewSpacePageIterator it(space->bottom(), end);
135 // The bottom position is at the start of its page. Allows us to use
136 // page->area_start() as start of range on all pages.
137 CHECK_EQ(space->bottom(),
138 NewSpacePage::FromAddress(space->bottom())->area_start());
139 while (it.has_next()) {
140 NewSpacePage* page = it.next();
141 Address limit = it.has_next() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
148 static void VerifyMarking(PagedSpace* space) {
149 PageIterator it(space);
151 while (it.has_next()) {
153 VerifyMarking(space->heap(), p->area_start(), p->area_end());
158 static void VerifyMarking(Heap* heap) {
159 VerifyMarking(heap->old_pointer_space());
160 VerifyMarking(heap->old_data_space());
161 VerifyMarking(heap->code_space());
162 VerifyMarking(heap->cell_space());
163 VerifyMarking(heap->property_cell_space());
164 VerifyMarking(heap->map_space());
165 VerifyMarking(heap->new_space());
167 VerifyMarkingVisitor visitor(heap);
169 LargeObjectIterator it(heap->lo_space());
170 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
171 if (MarkCompactCollector::IsMarked(obj)) {
172 obj->Iterate(&visitor);
176 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
180 class VerifyEvacuationVisitor: public ObjectVisitor {
182 void VisitPointers(Object** start, Object** end) {
183 for (Object** current = start; current < end; current++) {
184 if ((*current)->IsHeapObject()) {
185 HeapObject* object = HeapObject::cast(*current);
186 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
193 static void VerifyEvacuation(Address bottom, Address top) {
194 VerifyEvacuationVisitor visitor;
196 Address next_object_must_be_here_or_later = bottom;
198 for (Address current = bottom;
200 current += kPointerSize) {
201 object = HeapObject::FromAddress(current);
202 if (MarkCompactCollector::IsMarked(object)) {
203 CHECK(current >= next_object_must_be_here_or_later);
204 object->Iterate(&visitor);
205 next_object_must_be_here_or_later = current + object->Size();
211 static void VerifyEvacuation(NewSpace* space) {
212 NewSpacePageIterator it(space->bottom(), space->top());
213 VerifyEvacuationVisitor visitor;
215 while (it.has_next()) {
216 NewSpacePage* page = it.next();
217 Address current = page->area_start();
218 Address limit = it.has_next() ? page->area_end() : space->top();
219 CHECK(limit == space->top() || !page->Contains(space->top()));
220 while (current < limit) {
221 HeapObject* object = HeapObject::FromAddress(current);
222 object->Iterate(&visitor);
223 current += object->Size();
229 static void VerifyEvacuation(PagedSpace* space) {
230 // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
232 if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
233 space->was_swept_conservatively()) return;
234 PageIterator it(space);
236 while (it.has_next()) {
238 if (p->IsEvacuationCandidate()) continue;
239 VerifyEvacuation(p->area_start(), p->area_end());
244 static void VerifyEvacuation(Heap* heap) {
245 VerifyEvacuation(heap->old_pointer_space());
246 VerifyEvacuation(heap->old_data_space());
247 VerifyEvacuation(heap->code_space());
248 VerifyEvacuation(heap->cell_space());
249 VerifyEvacuation(heap->property_cell_space());
250 VerifyEvacuation(heap->map_space());
251 VerifyEvacuation(heap->new_space());
253 VerifyEvacuationVisitor visitor;
254 heap->IterateStrongRoots(&visitor, VISIT_ALL);
256 #endif // VERIFY_HEAP
260 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
262 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
264 void VisitPointers(Object** start, Object** end) {
265 for (Object** current = start; current < end; current++) {
266 if ((*current)->IsHeapObject()) {
267 HeapObject* object = HeapObject::cast(*current);
268 if (object->IsString()) continue;
269 switch (object->map()->instance_type()) {
270 case JS_FUNCTION_TYPE:
271 CheckContext(JSFunction::cast(object)->context());
273 case JS_GLOBAL_PROXY_TYPE:
274 CheckContext(JSGlobalProxy::cast(object)->native_context());
276 case JS_GLOBAL_OBJECT_TYPE:
277 case JS_BUILTINS_OBJECT_TYPE:
278 CheckContext(GlobalObject::cast(object)->native_context());
284 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
287 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
288 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
290 case FIXED_ARRAY_TYPE:
291 if (object->IsContext()) {
292 CheckContext(object);
294 FixedArray* array = FixedArray::cast(object);
295 int length = array->length();
296 // Set array length to zero to prevent cycles while iterating
297 // over array bodies, this is easier than intrusive marking.
298 array->set_length(0);
300 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
301 array->set_length(length);
307 case TYPE_FEEDBACK_INFO_TYPE:
308 object->Iterate(this);
310 case DECLARED_ACCESSOR_INFO_TYPE:
311 case EXECUTABLE_ACCESSOR_INFO_TYPE:
312 case BYTE_ARRAY_TYPE:
313 case CALL_HANDLER_INFO_TYPE:
315 case FIXED_DOUBLE_ARRAY_TYPE:
316 case HEAP_NUMBER_TYPE:
319 case INTERCEPTOR_INFO_TYPE:
322 case SHARED_FUNCTION_INFO_TYPE:
332 void CheckContext(Object* context) {
333 if (!context->IsContext()) return;
334 Context* native_context = Context::cast(context)->native_context();
335 if (current_native_context_ == NULL) {
336 current_native_context_ = native_context;
338 CHECK_EQ(current_native_context_, native_context);
342 Context* current_native_context_;
346 static void VerifyNativeContextSeparation(Heap* heap) {
347 HeapObjectIterator it(heap->code_space());
349 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
350 VerifyNativeContextSeparationVisitor visitor;
351 Code::cast(object)->CodeIterateBody(&visitor);
357 void MarkCompactCollector::SetUp() {
358 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
359 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
363 void MarkCompactCollector::TearDown() {
368 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
369 p->MarkEvacuationCandidate();
370 evacuation_candidates_.Add(p);
374 static void TraceFragmentation(PagedSpace* space) {
375 int number_of_pages = space->CountTotalPages();
376 intptr_t reserved = (number_of_pages * space->AreaSize());
377 intptr_t free = reserved - space->SizeOfObjects();
378 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
379 AllocationSpaceName(space->identity()),
381 static_cast<int>(free),
382 static_cast<double>(free) * 100 / reserved);
386 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
388 ASSERT(evacuation_candidates_.length() == 0);
390 #ifdef ENABLE_GDB_JIT_INTERFACE
391 // If GDBJIT interface is active disable compaction.
392 if (FLAG_gdbjit) return false;
395 CollectEvacuationCandidates(heap()->old_pointer_space());
396 CollectEvacuationCandidates(heap()->old_data_space());
398 if (FLAG_compact_code_space &&
399 (mode == NON_INCREMENTAL_COMPACTION ||
400 FLAG_incremental_code_compaction)) {
401 CollectEvacuationCandidates(heap()->code_space());
402 } else if (FLAG_trace_fragmentation) {
403 TraceFragmentation(heap()->code_space());
406 if (FLAG_trace_fragmentation) {
407 TraceFragmentation(heap()->map_space());
408 TraceFragmentation(heap()->cell_space());
409 TraceFragmentation(heap()->property_cell_space());
412 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
413 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
414 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
416 compacting_ = evacuation_candidates_.length() > 0;
423 void MarkCompactCollector::CollectGarbage() {
424 // Make sure that Prepare() has been called. The individual steps below will
425 // update the state as they proceed.
426 ASSERT(state_ == PREPARE_GC);
427 ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
430 ASSERT(heap_->incremental_marking()->IsStopped());
432 if (FLAG_collect_maps) ClearNonLiveReferences();
434 ClearWeakCollections();
437 if (FLAG_verify_heap) {
438 VerifyMarking(heap_);
444 if (!FLAG_collect_maps) ReattachInitialMaps();
447 if (FLAG_verify_native_context_separation) {
448 VerifyNativeContextSeparation(heap_);
453 if (heap()->weak_embedded_objects_verification_enabled()) {
454 VerifyWeakEmbeddedObjectsInOptimizedCode();
456 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
457 VerifyOmittedMapChecks();
463 if (marking_parity_ == EVEN_MARKING_PARITY) {
464 marking_parity_ = ODD_MARKING_PARITY;
466 ASSERT(marking_parity_ == ODD_MARKING_PARITY);
467 marking_parity_ = EVEN_MARKING_PARITY;
475 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
476 PageIterator it(space);
478 while (it.has_next()) {
480 CHECK(p->markbits()->IsClean());
481 CHECK_EQ(0, p->LiveBytes());
486 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
487 NewSpacePageIterator it(space->bottom(), space->top());
489 while (it.has_next()) {
490 NewSpacePage* p = it.next();
491 CHECK(p->markbits()->IsClean());
492 CHECK_EQ(0, p->LiveBytes());
497 void MarkCompactCollector::VerifyMarkbitsAreClean() {
498 VerifyMarkbitsAreClean(heap_->old_pointer_space());
499 VerifyMarkbitsAreClean(heap_->old_data_space());
500 VerifyMarkbitsAreClean(heap_->code_space());
501 VerifyMarkbitsAreClean(heap_->cell_space());
502 VerifyMarkbitsAreClean(heap_->property_cell_space());
503 VerifyMarkbitsAreClean(heap_->map_space());
504 VerifyMarkbitsAreClean(heap_->new_space());
506 LargeObjectIterator it(heap_->lo_space());
507 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
508 MarkBit mark_bit = Marking::MarkBitFrom(obj);
509 CHECK(Marking::IsWhite(mark_bit));
510 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
515 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
516 HeapObjectIterator code_iterator(heap()->code_space());
517 for (HeapObject* obj = code_iterator.Next();
519 obj = code_iterator.Next()) {
520 Code* code = Code::cast(obj);
521 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
522 if (WillBeDeoptimized(code)) continue;
523 code->VerifyEmbeddedObjectsDependency();
528 void MarkCompactCollector::VerifyOmittedMapChecks() {
529 HeapObjectIterator iterator(heap()->map_space());
530 for (HeapObject* obj = iterator.Next();
532 obj = iterator.Next()) {
533 Map* map = Map::cast(obj);
534 map->VerifyOmittedMapChecks();
537 #endif // VERIFY_HEAP
540 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
541 PageIterator it(space);
543 while (it.has_next()) {
544 Bitmap::Clear(it.next());
549 static void ClearMarkbitsInNewSpace(NewSpace* space) {
550 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
552 while (it.has_next()) {
553 Bitmap::Clear(it.next());
558 void MarkCompactCollector::ClearMarkbits() {
559 ClearMarkbitsInPagedSpace(heap_->code_space());
560 ClearMarkbitsInPagedSpace(heap_->map_space());
561 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
562 ClearMarkbitsInPagedSpace(heap_->old_data_space());
563 ClearMarkbitsInPagedSpace(heap_->cell_space());
564 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
565 ClearMarkbitsInNewSpace(heap_->new_space());
567 LargeObjectIterator it(heap_->lo_space());
568 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
569 MarkBit mark_bit = Marking::MarkBitFrom(obj);
571 mark_bit.Next().Clear();
572 Page::FromAddress(obj->address())->ResetProgressBar();
573 Page::FromAddress(obj->address())->ResetLiveBytes();
578 class MarkCompactCollector::SweeperTask : public v8::Task {
580 SweeperTask(Heap* heap, PagedSpace* space)
581 : heap_(heap), space_(space) {}
583 virtual ~SweeperTask() {}
586 // v8::Task overrides.
587 virtual void Run() V8_OVERRIDE {
588 heap_->mark_compact_collector()->SweepInParallel(space_);
589 heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
595 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
599 void MarkCompactCollector::StartSweeperThreads() {
600 // TODO(hpayer): This check is just used for debugging purpose and
601 // should be removed or turned into an assert after investigating the
602 // crash in concurrent sweeping.
603 CHECK(free_list_old_pointer_space_.get()->IsEmpty());
604 CHECK(free_list_old_data_space_.get()->IsEmpty());
605 sweeping_pending_ = true;
606 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
607 isolate()->sweeper_threads()[i]->StartSweeping();
609 if (FLAG_job_based_sweeping) {
610 V8::GetCurrentPlatform()->CallOnBackgroundThread(
611 new SweeperTask(heap(), heap()->old_data_space()),
612 v8::Platform::kShortRunningTask);
613 V8::GetCurrentPlatform()->CallOnBackgroundThread(
614 new SweeperTask(heap(), heap()->old_pointer_space()),
615 v8::Platform::kShortRunningTask);
620 void MarkCompactCollector::WaitUntilSweepingCompleted() {
621 ASSERT(sweeping_pending_ == true);
622 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
623 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
625 if (FLAG_job_based_sweeping) {
626 // Wait twice for both jobs.
627 pending_sweeper_jobs_semaphore_.Wait();
628 pending_sweeper_jobs_semaphore_.Wait();
630 ParallelSweepSpacesComplete();
631 sweeping_pending_ = false;
632 RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
633 RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
634 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
635 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
639 intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
642 if (space == heap()->old_pointer_space()) {
643 free_list = free_list_old_pointer_space_.get();
644 } else if (space == heap()->old_data_space()) {
645 free_list = free_list_old_data_space_.get();
647 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
648 // to only refill them for old data and pointer spaces.
652 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
653 space->AddToAccountingStats(freed_bytes);
654 space->DecrementUnsweptFreeBytes(freed_bytes);
659 bool MarkCompactCollector::AreSweeperThreadsActivated() {
660 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
664 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
665 return sweeping_pending_;
669 void Marking::TransferMark(Address old_start, Address new_start) {
670 // This is only used when resizing an object.
671 ASSERT(MemoryChunk::FromAddress(old_start) ==
672 MemoryChunk::FromAddress(new_start));
674 if (!heap_->incremental_marking()->IsMarking()) return;
676 // If the mark doesn't move, we don't check the color of the object.
677 // It doesn't matter whether the object is black, since it hasn't changed
678 // size, so the adjustment to the live data count will be zero anyway.
679 if (old_start == new_start) return;
681 MarkBit new_mark_bit = MarkBitFrom(new_start);
682 MarkBit old_mark_bit = MarkBitFrom(old_start);
685 ObjectColor old_color = Color(old_mark_bit);
688 if (Marking::IsBlack(old_mark_bit)) {
689 old_mark_bit.Clear();
690 ASSERT(IsWhite(old_mark_bit));
691 Marking::MarkBlack(new_mark_bit);
693 } else if (Marking::IsGrey(old_mark_bit)) {
694 old_mark_bit.Clear();
695 old_mark_bit.Next().Clear();
696 ASSERT(IsWhite(old_mark_bit));
697 heap_->incremental_marking()->WhiteToGreyAndPush(
698 HeapObject::FromAddress(new_start), new_mark_bit);
699 heap_->incremental_marking()->RestartIfNotMarking();
703 ObjectColor new_color = Color(new_mark_bit);
704 ASSERT(new_color == old_color);
709 const char* AllocationSpaceName(AllocationSpace space) {
711 case NEW_SPACE: return "NEW_SPACE";
712 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
713 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
714 case CODE_SPACE: return "CODE_SPACE";
715 case MAP_SPACE: return "MAP_SPACE";
716 case CELL_SPACE: return "CELL_SPACE";
717 case PROPERTY_CELL_SPACE:
718 return "PROPERTY_CELL_SPACE";
719 case LO_SPACE: return "LO_SPACE";
728 // Returns zero for pages that have so little fragmentation that it is not
729 // worth defragmenting them. Otherwise a positive integer that gives an
730 // estimate of fragmentation on an arbitrary scale.
731 static int FreeListFragmentation(PagedSpace* space, Page* p) {
732 // If page was not swept then there are no free list items on it.
733 if (!p->WasSwept()) {
734 if (FLAG_trace_fragmentation) {
735 PrintF("%p [%s]: %d bytes live (unswept)\n",
736 reinterpret_cast<void*>(p),
737 AllocationSpaceName(space->identity()),
743 PagedSpace::SizeStats sizes;
744 space->ObtainFreeListStatistics(p, &sizes);
747 intptr_t ratio_threshold;
748 intptr_t area_size = space->AreaSize();
749 if (space->identity() == CODE_SPACE) {
750 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
752 ratio_threshold = 10;
754 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
756 ratio_threshold = 15;
759 if (FLAG_trace_fragmentation) {
760 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
761 reinterpret_cast<void*>(p),
762 AllocationSpaceName(space->identity()),
763 static_cast<int>(sizes.small_size_),
764 static_cast<double>(sizes.small_size_ * 100) /
766 static_cast<int>(sizes.medium_size_),
767 static_cast<double>(sizes.medium_size_ * 100) /
769 static_cast<int>(sizes.large_size_),
770 static_cast<double>(sizes.large_size_ * 100) /
772 static_cast<int>(sizes.huge_size_),
773 static_cast<double>(sizes.huge_size_ * 100) /
775 (ratio > ratio_threshold) ? "[fragmented]" : "");
778 if (FLAG_always_compact && sizes.Total() != area_size) {
782 if (ratio <= ratio_threshold) return 0; // Not fragmented.
784 return static_cast<int>(ratio - ratio_threshold);
788 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
789 ASSERT(space->identity() == OLD_POINTER_SPACE ||
790 space->identity() == OLD_DATA_SPACE ||
791 space->identity() == CODE_SPACE);
793 static const int kMaxMaxEvacuationCandidates = 1000;
794 int number_of_pages = space->CountTotalPages();
795 int max_evacuation_candidates =
796 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
798 if (FLAG_stress_compaction || FLAG_always_compact) {
799 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
804 Candidate() : fragmentation_(0), page_(NULL) { }
805 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
807 int fragmentation() { return fragmentation_; }
808 Page* page() { return page_; }
815 enum CompactionMode {
817 REDUCE_MEMORY_FOOTPRINT
820 CompactionMode mode = COMPACT_FREE_LISTS;
822 intptr_t reserved = number_of_pages * space->AreaSize();
823 intptr_t over_reserved = reserved - space->SizeOfObjects();
824 static const intptr_t kFreenessThreshold = 50;
826 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
827 // If reduction of memory footprint was requested, we are aggressive
828 // about choosing pages to free. We expect that half-empty pages
829 // are easier to compact so slightly bump the limit.
830 mode = REDUCE_MEMORY_FOOTPRINT;
831 max_evacuation_candidates += 2;
835 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
836 // If over-usage is very high (more than a third of the space), we
837 // try to free all mostly empty pages. We expect that almost empty
838 // pages are even easier to compact so bump the limit even more.
839 mode = REDUCE_MEMORY_FOOTPRINT;
840 max_evacuation_candidates *= 2;
843 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
844 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
845 "evacuation candidate limit: %d\n",
846 static_cast<double>(over_reserved) / MB,
847 static_cast<double>(reserved) / MB,
848 static_cast<int>(kFreenessThreshold),
849 max_evacuation_candidates);
852 intptr_t estimated_release = 0;
854 Candidate candidates[kMaxMaxEvacuationCandidates];
856 max_evacuation_candidates =
857 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
860 int fragmentation = 0;
861 Candidate* least = NULL;
863 PageIterator it(space);
864 if (it.has_next()) it.next(); // Never compact the first page.
866 while (it.has_next()) {
868 p->ClearEvacuationCandidate();
870 if (FLAG_stress_compaction) {
871 unsigned int counter = space->heap()->ms_count();
872 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
873 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
874 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
875 // Don't try to release too many pages.
876 if (estimated_release >= over_reserved) {
880 intptr_t free_bytes = 0;
882 if (!p->WasSwept()) {
883 free_bytes = (p->area_size() - p->LiveBytes());
885 PagedSpace::SizeStats sizes;
886 space->ObtainFreeListStatistics(p, &sizes);
887 free_bytes = sizes.Total();
890 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
892 if (free_pct >= kFreenessThreshold) {
893 estimated_release += free_bytes;
894 fragmentation = free_pct;
899 if (FLAG_trace_fragmentation) {
900 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
901 reinterpret_cast<void*>(p),
902 AllocationSpaceName(space->identity()),
903 static_cast<int>(free_bytes),
904 static_cast<double>(free_bytes * 100) / p->area_size(),
905 (fragmentation > 0) ? "[fragmented]" : "");
908 fragmentation = FreeListFragmentation(space, p);
911 if (fragmentation != 0) {
912 if (count < max_evacuation_candidates) {
913 candidates[count++] = Candidate(fragmentation, p);
916 for (int i = 0; i < max_evacuation_candidates; i++) {
918 candidates[i].fragmentation() < least->fragmentation()) {
919 least = candidates + i;
923 if (least->fragmentation() < fragmentation) {
924 *least = Candidate(fragmentation, p);
931 for (int i = 0; i < count; i++) {
932 AddEvacuationCandidate(candidates[i].page());
935 if (count > 0 && FLAG_trace_fragmentation) {
936 PrintF("Collected %d evacuation candidates for space %s\n",
938 AllocationSpaceName(space->identity()));
943 void MarkCompactCollector::AbortCompaction() {
945 int npages = evacuation_candidates_.length();
946 for (int i = 0; i < npages; i++) {
947 Page* p = evacuation_candidates_[i];
948 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
949 p->ClearEvacuationCandidate();
950 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
953 evacuation_candidates_.Rewind(0);
954 invalidated_code_.Rewind(0);
956 ASSERT_EQ(0, evacuation_candidates_.length());
960 void MarkCompactCollector::Prepare(GCTracer* tracer) {
961 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
963 // Rather than passing the tracer around we stash it in a static member
968 ASSERT(state_ == IDLE);
972 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
974 if (IsConcurrentSweepingInProgress()) {
975 // Instead of waiting we could also abort the sweeper threads here.
976 WaitUntilSweepingCompleted();
979 // Clear marking bits if incremental marking is aborted.
980 if (was_marked_incrementally_ && abort_incremental_marking_) {
981 heap()->incremental_marking()->Abort();
984 was_marked_incrementally_ = false;
987 // Don't start compaction if we are in the middle of incremental
988 // marking cycle. We did not collect any slots.
989 if (!FLAG_never_compact && !was_marked_incrementally_) {
990 StartCompaction(NON_INCREMENTAL_COMPACTION);
993 PagedSpaces spaces(heap());
994 for (PagedSpace* space = spaces.next();
996 space = spaces.next()) {
997 space->PrepareForMarkCompact();
1001 if (!was_marked_incrementally_ && FLAG_verify_heap) {
1002 VerifyMarkbitsAreClean();
1008 void MarkCompactCollector::Finish() {
1010 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1013 // The stub cache is not traversed during GC; clear the cache to
1014 // force lazy re-initialization of it. This must be done after the
1015 // GC, because it relies on the new address of certain old space
1016 // objects (empty string, illegal builtin).
1017 isolate()->stub_cache()->Clear();
1019 if (have_code_to_deoptimize_) {
1020 // Some code objects were marked for deoptimization during the GC.
1021 Deoptimizer::DeoptimizeMarkedCode(isolate());
1022 have_code_to_deoptimize_ = false;
1027 // -------------------------------------------------------------------------
1028 // Phase 1: tracing and marking live objects.
1029 // before: all objects are in normal state.
1030 // after: a live object's map pointer is marked as '00'.
1032 // Marking all live objects in the heap as part of mark-sweep or mark-compact
1033 // collection. Before marking, all objects are in their normal state. After
1034 // marking, live objects' map pointers are marked indicating that the object
1035 // has been found reachable.
1037 // The marking algorithm is a (mostly) depth-first (because of possible stack
1038 // overflow) traversal of the graph of objects reachable from the roots. It
1039 // uses an explicit stack of pointers rather than recursion. The young
1040 // generation's inactive ('from') space is used as a marking stack. The
1041 // objects in the marking stack are the ones that have been reached and marked
1042 // but their children have not yet been visited.
1044 // The marking stack can overflow during traversal. In that case, we set an
1045 // overflow flag. When the overflow flag is set, we continue marking objects
1046 // reachable from the objects on the marking stack, but no longer push them on
1047 // the marking stack. Instead, we mark them as both marked and overflowed.
1048 // When the stack is in the overflowed state, objects marked as overflowed
1049 // have been reached and marked but their children have not been visited yet.
1050 // After emptying the marking stack, we clear the overflow flag and traverse
1051 // the heap looking for objects marked as overflowed, push them on the stack,
1052 // and continue with marking. This process repeats until all reachable
1053 // objects have been marked.
1055 void CodeFlusher::ProcessJSFunctionCandidates() {
1056 Code* lazy_compile =
1057 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1058 Object* undefined = isolate_->heap()->undefined_value();
1060 JSFunction* candidate = jsfunction_candidates_head_;
1061 JSFunction* next_candidate;
1062 while (candidate != NULL) {
1063 next_candidate = GetNextCandidate(candidate);
1064 ClearNextCandidate(candidate, undefined);
1066 SharedFunctionInfo* shared = candidate->shared();
1068 Code* code = shared->code();
1069 MarkBit code_mark = Marking::MarkBitFrom(code);
1070 if (!code_mark.Get()) {
1071 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1072 PrintF("[code-flushing clears: ");
1073 shared->ShortPrint();
1074 PrintF(" - age: %d]\n", code->GetAge());
1076 shared->set_code(lazy_compile);
1077 candidate->set_code(lazy_compile);
1079 candidate->set_code(code);
1082 // We are in the middle of a GC cycle so the write barrier in the code
1083 // setter did not record the slot update and we have to do that manually.
1084 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1085 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1086 isolate_->heap()->mark_compact_collector()->
1087 RecordCodeEntrySlot(slot, target);
1089 Object** shared_code_slot =
1090 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1091 isolate_->heap()->mark_compact_collector()->
1092 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1094 candidate = next_candidate;
1097 jsfunction_candidates_head_ = NULL;
1101 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1102 Code* lazy_compile =
1103 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1105 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1106 SharedFunctionInfo* next_candidate;
1107 while (candidate != NULL) {
1108 next_candidate = GetNextCandidate(candidate);
1109 ClearNextCandidate(candidate);
1111 Code* code = candidate->code();
1112 MarkBit code_mark = Marking::MarkBitFrom(code);
1113 if (!code_mark.Get()) {
1114 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1115 PrintF("[code-flushing clears: ");
1116 candidate->ShortPrint();
1117 PrintF(" - age: %d]\n", code->GetAge());
1119 candidate->set_code(lazy_compile);
1122 Object** code_slot =
1123 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1124 isolate_->heap()->mark_compact_collector()->
1125 RecordSlot(code_slot, code_slot, *code_slot);
1127 candidate = next_candidate;
1130 shared_function_info_candidates_head_ = NULL;
1134 void CodeFlusher::ProcessOptimizedCodeMaps() {
1135 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1137 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1138 SharedFunctionInfo* next_holder;
1140 while (holder != NULL) {
1141 next_holder = GetNextCodeMap(holder);
1142 ClearNextCodeMap(holder);
1144 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1145 int new_length = SharedFunctionInfo::kEntriesStart;
1146 int old_length = code_map->length();
1147 for (int i = SharedFunctionInfo::kEntriesStart;
1149 i += SharedFunctionInfo::kEntryLength) {
1151 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1152 if (!Marking::MarkBitFrom(code).Get()) continue;
1154 // Move every slot in the entry.
1155 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1156 int dst_index = new_length++;
1157 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1158 Object* object = code_map->get(i + j);
1159 code_map->set(dst_index, object);
1160 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1161 ASSERT(object->IsSmi());
1163 ASSERT(Marking::IsBlack(
1164 Marking::MarkBitFrom(HeapObject::cast(*slot))));
1165 isolate_->heap()->mark_compact_collector()->
1166 RecordSlot(slot, slot, *slot);
1171 // Trim the optimized code map if entries have been removed.
1172 if (new_length < old_length) {
1173 holder->TrimOptimizedCodeMap(old_length - new_length);
1176 holder = next_holder;
1179 optimized_code_map_holder_head_ = NULL;
1183 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1184 // Make sure previous flushing decisions are revisited.
1185 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1187 if (FLAG_trace_code_flushing) {
1188 PrintF("[code-flushing abandons function-info: ");
1189 shared_info->ShortPrint();
1193 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1194 SharedFunctionInfo* next_candidate;
1195 if (candidate == shared_info) {
1196 next_candidate = GetNextCandidate(shared_info);
1197 shared_function_info_candidates_head_ = next_candidate;
1198 ClearNextCandidate(shared_info);
1200 while (candidate != NULL) {
1201 next_candidate = GetNextCandidate(candidate);
1203 if (next_candidate == shared_info) {
1204 next_candidate = GetNextCandidate(shared_info);
1205 SetNextCandidate(candidate, next_candidate);
1206 ClearNextCandidate(shared_info);
1210 candidate = next_candidate;
1216 void CodeFlusher::EvictCandidate(JSFunction* function) {
1217 ASSERT(!function->next_function_link()->IsUndefined());
1218 Object* undefined = isolate_->heap()->undefined_value();
1220 // Make sure previous flushing decisions are revisited.
1221 isolate_->heap()->incremental_marking()->RecordWrites(function);
1222 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1224 if (FLAG_trace_code_flushing) {
1225 PrintF("[code-flushing abandons closure: ");
1226 function->shared()->ShortPrint();
1230 JSFunction* candidate = jsfunction_candidates_head_;
1231 JSFunction* next_candidate;
1232 if (candidate == function) {
1233 next_candidate = GetNextCandidate(function);
1234 jsfunction_candidates_head_ = next_candidate;
1235 ClearNextCandidate(function, undefined);
1237 while (candidate != NULL) {
1238 next_candidate = GetNextCandidate(candidate);
1240 if (next_candidate == function) {
1241 next_candidate = GetNextCandidate(function);
1242 SetNextCandidate(candidate, next_candidate);
1243 ClearNextCandidate(function, undefined);
1247 candidate = next_candidate;
1253 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1254 ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1255 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1257 // Make sure previous flushing decisions are revisited.
1258 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1260 if (FLAG_trace_code_flushing) {
1261 PrintF("[code-flushing abandons code-map: ");
1262 code_map_holder->ShortPrint();
1266 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1267 SharedFunctionInfo* next_holder;
1268 if (holder == code_map_holder) {
1269 next_holder = GetNextCodeMap(code_map_holder);
1270 optimized_code_map_holder_head_ = next_holder;
1271 ClearNextCodeMap(code_map_holder);
1273 while (holder != NULL) {
1274 next_holder = GetNextCodeMap(holder);
1276 if (next_holder == code_map_holder) {
1277 next_holder = GetNextCodeMap(code_map_holder);
1278 SetNextCodeMap(holder, next_holder);
1279 ClearNextCodeMap(code_map_holder);
1283 holder = next_holder;
1289 void CodeFlusher::EvictJSFunctionCandidates() {
1290 JSFunction* candidate = jsfunction_candidates_head_;
1291 JSFunction* next_candidate;
1292 while (candidate != NULL) {
1293 next_candidate = GetNextCandidate(candidate);
1294 EvictCandidate(candidate);
1295 candidate = next_candidate;
1297 ASSERT(jsfunction_candidates_head_ == NULL);
1301 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1302 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1303 SharedFunctionInfo* next_candidate;
1304 while (candidate != NULL) {
1305 next_candidate = GetNextCandidate(candidate);
1306 EvictCandidate(candidate);
1307 candidate = next_candidate;
1309 ASSERT(shared_function_info_candidates_head_ == NULL);
1313 void CodeFlusher::EvictOptimizedCodeMaps() {
1314 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1315 SharedFunctionInfo* next_holder;
1316 while (holder != NULL) {
1317 next_holder = GetNextCodeMap(holder);
1318 EvictOptimizedCodeMap(holder);
1319 holder = next_holder;
1321 ASSERT(optimized_code_map_holder_head_ == NULL);
1325 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1326 Heap* heap = isolate_->heap();
1328 JSFunction** slot = &jsfunction_candidates_head_;
1329 JSFunction* candidate = jsfunction_candidates_head_;
1330 while (candidate != NULL) {
1331 if (heap->InFromSpace(candidate)) {
1332 v->VisitPointer(reinterpret_cast<Object**>(slot));
1334 candidate = GetNextCandidate(*slot);
1335 slot = GetNextCandidateSlot(*slot);
1340 MarkCompactCollector::~MarkCompactCollector() {
1341 if (code_flusher_ != NULL) {
1342 delete code_flusher_;
1343 code_flusher_ = NULL;
1348 static inline HeapObject* ShortCircuitConsString(Object** p) {
1349 // Optimization: If the heap object pointed to by p is a non-internalized
1350 // cons string whose right substring is HEAP->empty_string, update
1351 // it in place to its left substring. Return the updated value.
1353 // Here we assume that if we change *p, we replace it with a heap object
1354 // (i.e., the left substring of a cons string is always a heap object).
1356 // The check performed is:
1357 // object->IsConsString() && !object->IsInternalizedString() &&
1358 // (ConsString::cast(object)->second() == HEAP->empty_string())
1359 // except the maps for the object and its possible substrings might be
1361 HeapObject* object = HeapObject::cast(*p);
1362 if (!FLAG_clever_optimizations) return object;
1363 Map* map = object->map();
1364 InstanceType type = map->instance_type();
1365 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1367 Object* second = reinterpret_cast<ConsString*>(object)->second();
1368 Heap* heap = map->GetHeap();
1369 if (second != heap->empty_string()) {
1373 // Since we don't have the object's start, it is impossible to update the
1374 // page dirty marks. Therefore, we only replace the string with its left
1375 // substring when page dirty marks do not change.
1376 Object* first = reinterpret_cast<ConsString*>(object)->first();
1377 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1380 return HeapObject::cast(first);
1384 class MarkCompactMarkingVisitor
1385 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1387 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1388 Map* map, HeapObject* obj);
1390 static void ObjectStatsCountFixedArray(
1391 FixedArrayBase* fixed_array,
1392 FixedArraySubInstanceType fast_type,
1393 FixedArraySubInstanceType dictionary_type);
1395 template<MarkCompactMarkingVisitor::VisitorId id>
1396 class ObjectStatsTracker {
1398 static inline void Visit(Map* map, HeapObject* obj);
1401 static void Initialize();
1403 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1404 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1407 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1408 // Mark all objects pointed to in [start, end).
1409 const int kMinRangeForMarkingRecursion = 64;
1410 if (end - start >= kMinRangeForMarkingRecursion) {
1411 if (VisitUnmarkedObjects(heap, start, end)) return;
1412 // We are close to a stack overflow, so just mark the objects.
1414 MarkCompactCollector* collector = heap->mark_compact_collector();
1415 for (Object** p = start; p < end; p++) {
1416 MarkObjectByPointer(collector, start, p);
1420 // Marks the object black and pushes it on the marking stack.
1421 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1422 MarkBit mark = Marking::MarkBitFrom(object);
1423 heap->mark_compact_collector()->MarkObject(object, mark);
1426 // Marks the object black without pushing it on the marking stack.
1427 // Returns true if object needed marking and false otherwise.
1428 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1429 MarkBit mark_bit = Marking::MarkBitFrom(object);
1430 if (!mark_bit.Get()) {
1431 heap->mark_compact_collector()->SetMark(object, mark_bit);
1437 // Mark object pointed to by p.
1438 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1439 Object** anchor_slot,
1441 if (!(*p)->IsHeapObject()) return;
1442 HeapObject* object = ShortCircuitConsString(p);
1443 collector->RecordSlot(anchor_slot, p, object);
1444 MarkBit mark = Marking::MarkBitFrom(object);
1445 collector->MarkObject(object, mark);
1449 // Visit an unmarked object.
1450 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1453 ASSERT(collector->heap()->Contains(obj));
1454 ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1456 Map* map = obj->map();
1457 Heap* heap = obj->GetHeap();
1458 MarkBit mark = Marking::MarkBitFrom(obj);
1459 heap->mark_compact_collector()->SetMark(obj, mark);
1460 // Mark the map pointer and the body.
1461 MarkBit map_mark = Marking::MarkBitFrom(map);
1462 heap->mark_compact_collector()->MarkObject(map, map_mark);
1463 IterateBody(map, obj);
1466 // Visit all unmarked objects pointed to by [start, end).
1467 // Returns false if the operation fails (lack of stack space).
1468 INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1471 // Return false is we are close to the stack limit.
1472 StackLimitCheck check(heap->isolate());
1473 if (check.HasOverflowed()) return false;
1475 MarkCompactCollector* collector = heap->mark_compact_collector();
1476 // Visit the unmarked objects.
1477 for (Object** p = start; p < end; p++) {
1479 if (!o->IsHeapObject()) continue;
1480 collector->RecordSlot(start, p, o);
1481 HeapObject* obj = HeapObject::cast(o);
1482 MarkBit mark = Marking::MarkBitFrom(obj);
1483 if (mark.Get()) continue;
1484 VisitUnmarkedObject(collector, obj);
1489 INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1490 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1491 shared->BeforeVisitingPointers();
1494 static void VisitWeakCollection(Map* map, HeapObject* object) {
1495 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1496 JSWeakCollection* weak_collection =
1497 reinterpret_cast<JSWeakCollection*>(object);
1499 // Enqueue weak map in linked list of encountered weak maps.
1500 if (weak_collection->next() == Smi::FromInt(0)) {
1501 weak_collection->set_next(collector->encountered_weak_collections());
1502 collector->set_encountered_weak_collections(weak_collection);
1505 // Skip visiting the backing hash table containing the mappings.
1506 int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1507 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1510 JSWeakCollection::BodyDescriptor::kStartOffset,
1511 JSWeakCollection::kTableOffset);
1512 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1515 JSWeakCollection::kTableOffset + kPointerSize,
1518 // Mark the backing hash table without pushing it on the marking stack.
1519 Object* table_object = weak_collection->table();
1520 if (!table_object->IsHashTable()) return;
1521 WeakHashTable* table = WeakHashTable::cast(table_object);
1522 Object** table_slot =
1523 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1524 MarkBit table_mark = Marking::MarkBitFrom(table);
1525 collector->RecordSlot(table_slot, table_slot, table);
1526 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1527 // Recording the map slot can be skipped, because maps are not compacted.
1528 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1529 ASSERT(MarkCompactCollector::IsMarked(table->map()));
1534 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1536 // Code flushing support.
1538 static const int kRegExpCodeThreshold = 5;
1540 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1543 // Make sure that the fixed array is in fact initialized on the RegExp.
1544 // We could potentially trigger a GC when initializing the RegExp.
1545 if (HeapObject::cast(re->data())->map()->instance_type() !=
1546 FIXED_ARRAY_TYPE) return;
1548 // Make sure this is a RegExp that actually contains code.
1549 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1551 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1552 if (!code->IsSmi() &&
1553 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1554 // Save a copy that can be reinstated if we need the code again.
1555 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1557 // Saving a copy might create a pointer into compaction candidate
1558 // that was not observed by marker. This might happen if JSRegExp data
1559 // was marked through the compilation cache before marker reached JSRegExp
1561 FixedArray* data = FixedArray::cast(re->data());
1562 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1563 heap->mark_compact_collector()->
1564 RecordSlot(slot, slot, code);
1566 // Set a number in the 0-255 range to guarantee no smi overflow.
1567 re->SetDataAt(JSRegExp::code_index(is_ascii),
1568 Smi::FromInt(heap->sweep_generation() & 0xff));
1569 } else if (code->IsSmi()) {
1570 int value = Smi::cast(code)->value();
1571 // The regexp has not been compiled yet or there was a compilation error.
1572 if (value == JSRegExp::kUninitializedValue ||
1573 value == JSRegExp::kCompilationErrorValue) {
1577 // Check if we should flush now.
1578 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1579 re->SetDataAt(JSRegExp::code_index(is_ascii),
1580 Smi::FromInt(JSRegExp::kUninitializedValue));
1581 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1582 Smi::FromInt(JSRegExp::kUninitializedValue));
1588 // Works by setting the current sweep_generation (as a smi) in the
1589 // code object place in the data array of the RegExp and keeps a copy
1590 // around that can be reinstated if we reuse the RegExp before flushing.
1591 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1592 // we flush the code.
1593 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1594 Heap* heap = map->GetHeap();
1595 MarkCompactCollector* collector = heap->mark_compact_collector();
1596 if (!collector->is_code_flushing_enabled()) {
1597 VisitJSRegExp(map, object);
1600 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1601 // Flush code or set age on both ASCII and two byte code.
1602 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1603 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1604 // Visit the fields of the RegExp, including the updated FixedArray.
1605 VisitJSRegExp(map, object);
1608 static VisitorDispatchTable<Callback> non_count_table_;
1612 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1613 FixedArrayBase* fixed_array,
1614 FixedArraySubInstanceType fast_type,
1615 FixedArraySubInstanceType dictionary_type) {
1616 Heap* heap = fixed_array->map()->GetHeap();
1617 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1618 fixed_array->map() != heap->fixed_double_array_map() &&
1619 fixed_array != heap->empty_fixed_array()) {
1620 if (fixed_array->IsDictionary()) {
1621 heap->RecordFixedArraySubTypeStats(dictionary_type,
1622 fixed_array->Size());
1624 heap->RecordFixedArraySubTypeStats(fast_type,
1625 fixed_array->Size());
1631 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1632 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1633 Heap* heap = map->GetHeap();
1634 int object_size = obj->Size();
1635 heap->RecordObjectStats(map->instance_type(), object_size);
1636 non_count_table_.GetVisitorById(id)(map, obj);
1637 if (obj->IsJSObject()) {
1638 JSObject* object = JSObject::cast(obj);
1639 ObjectStatsCountFixedArray(object->elements(),
1640 DICTIONARY_ELEMENTS_SUB_TYPE,
1641 FAST_ELEMENTS_SUB_TYPE);
1642 ObjectStatsCountFixedArray(object->properties(),
1643 DICTIONARY_PROPERTIES_SUB_TYPE,
1644 FAST_PROPERTIES_SUB_TYPE);
1649 template<MarkCompactMarkingVisitor::VisitorId id>
1650 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1651 Map* map, HeapObject* obj) {
1652 ObjectStatsVisitBase(id, map, obj);
1657 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1658 MarkCompactMarkingVisitor::kVisitMap> {
1660 static inline void Visit(Map* map, HeapObject* obj) {
1661 Heap* heap = map->GetHeap();
1662 Map* map_obj = Map::cast(obj);
1663 ASSERT(map->instance_type() == MAP_TYPE);
1664 DescriptorArray* array = map_obj->instance_descriptors();
1665 if (map_obj->owns_descriptors() &&
1666 array != heap->empty_descriptor_array()) {
1667 int fixed_array_size = array->Size();
1668 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1671 if (map_obj->HasTransitionArray()) {
1672 int fixed_array_size = map_obj->transitions()->Size();
1673 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1676 if (map_obj->has_code_cache()) {
1677 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1678 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1679 cache->default_cache()->Size());
1680 if (!cache->normal_type_cache()->IsUndefined()) {
1681 heap->RecordFixedArraySubTypeStats(
1682 MAP_CODE_CACHE_SUB_TYPE,
1683 FixedArray::cast(cache->normal_type_cache())->Size());
1686 ObjectStatsVisitBase(kVisitMap, map, obj);
1692 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1693 MarkCompactMarkingVisitor::kVisitCode> {
1695 static inline void Visit(Map* map, HeapObject* obj) {
1696 Heap* heap = map->GetHeap();
1697 int object_size = obj->Size();
1698 ASSERT(map->instance_type() == CODE_TYPE);
1699 Code* code_obj = Code::cast(obj);
1700 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1702 ObjectStatsVisitBase(kVisitCode, map, obj);
1708 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1709 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1711 static inline void Visit(Map* map, HeapObject* obj) {
1712 Heap* heap = map->GetHeap();
1713 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1714 if (sfi->scope_info() != heap->empty_fixed_array()) {
1715 heap->RecordFixedArraySubTypeStats(
1716 SCOPE_INFO_SUB_TYPE,
1717 FixedArray::cast(sfi->scope_info())->Size());
1719 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1725 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1726 MarkCompactMarkingVisitor::kVisitFixedArray> {
1728 static inline void Visit(Map* map, HeapObject* obj) {
1729 Heap* heap = map->GetHeap();
1730 FixedArray* fixed_array = FixedArray::cast(obj);
1731 if (fixed_array == heap->string_table()) {
1732 heap->RecordFixedArraySubTypeStats(
1733 STRING_TABLE_SUB_TYPE,
1734 fixed_array->Size());
1736 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1741 void MarkCompactMarkingVisitor::Initialize() {
1742 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1744 table_.Register(kVisitJSRegExp,
1745 &VisitRegExpAndFlushCode);
1747 if (FLAG_track_gc_object_stats) {
1748 // Copy the visitor table to make call-through possible.
1749 non_count_table_.CopyFrom(&table_);
1750 #define VISITOR_ID_COUNT_FUNCTION(id) \
1751 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1752 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1753 #undef VISITOR_ID_COUNT_FUNCTION
1758 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1759 MarkCompactMarkingVisitor::non_count_table_;
1762 class CodeMarkingVisitor : public ThreadVisitor {
1764 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1765 : collector_(collector) {}
1767 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1768 collector_->PrepareThreadForCodeFlushing(isolate, top);
1772 MarkCompactCollector* collector_;
1776 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1778 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1779 : collector_(collector) {}
1781 void VisitPointers(Object** start, Object** end) {
1782 for (Object** p = start; p < end; p++) VisitPointer(p);
1785 void VisitPointer(Object** slot) {
1786 Object* obj = *slot;
1787 if (obj->IsSharedFunctionInfo()) {
1788 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1789 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1790 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1791 collector_->MarkObject(shared->code(), code_mark);
1792 collector_->MarkObject(shared, shared_mark);
1797 MarkCompactCollector* collector_;
1801 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1802 ThreadLocalTop* top) {
1803 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1804 // Note: for the frame that has a pending lazy deoptimization
1805 // StackFrame::unchecked_code will return a non-optimized code object for
1806 // the outermost function and StackFrame::LookupCode will return
1807 // actual optimized code object.
1808 StackFrame* frame = it.frame();
1809 Code* code = frame->unchecked_code();
1810 MarkBit code_mark = Marking::MarkBitFrom(code);
1811 MarkObject(code, code_mark);
1812 if (frame->is_optimized()) {
1813 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1814 frame->LookupCode());
1820 void MarkCompactCollector::PrepareForCodeFlushing() {
1821 // Enable code flushing for non-incremental cycles.
1822 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1823 EnableCodeFlushing(!was_marked_incrementally_);
1826 // If code flushing is disabled, there is no need to prepare for it.
1827 if (!is_code_flushing_enabled()) return;
1829 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1830 // relies on it being marked before any other descriptor array.
1831 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1832 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1833 MarkObject(descriptor_array, descriptor_array_mark);
1835 // Make sure we are not referencing the code from the stack.
1836 ASSERT(this == heap()->mark_compact_collector());
1837 PrepareThreadForCodeFlushing(heap()->isolate(),
1838 heap()->isolate()->thread_local_top());
1840 // Iterate the archived stacks in all threads to check if
1841 // the code is referenced.
1842 CodeMarkingVisitor code_marking_visitor(this);
1843 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1844 &code_marking_visitor);
1846 SharedFunctionInfoMarkingVisitor visitor(this);
1847 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1848 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1850 ProcessMarkingDeque();
1854 // Visitor class for marking heap roots.
1855 class RootMarkingVisitor : public ObjectVisitor {
1857 explicit RootMarkingVisitor(Heap* heap)
1858 : collector_(heap->mark_compact_collector()) { }
1860 void VisitPointer(Object** p) {
1861 MarkObjectByPointer(p);
1864 void VisitPointers(Object** start, Object** end) {
1865 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1868 // Skip the weak next code link in a code object, which is visited in
1869 // ProcessTopOptimizedFrame.
1870 void VisitNextCodeLink(Object** p) { }
1873 void MarkObjectByPointer(Object** p) {
1874 if (!(*p)->IsHeapObject()) return;
1876 // Replace flat cons strings in place.
1877 HeapObject* object = ShortCircuitConsString(p);
1878 MarkBit mark_bit = Marking::MarkBitFrom(object);
1879 if (mark_bit.Get()) return;
1881 Map* map = object->map();
1883 collector_->SetMark(object, mark_bit);
1885 // Mark the map pointer and body, and push them on the marking stack.
1886 MarkBit map_mark = Marking::MarkBitFrom(map);
1887 collector_->MarkObject(map, map_mark);
1888 MarkCompactMarkingVisitor::IterateBody(map, object);
1890 // Mark all the objects reachable from the map and body. May leave
1891 // overflowed objects in the heap.
1892 collector_->EmptyMarkingDeque();
1895 MarkCompactCollector* collector_;
1899 // Helper class for pruning the string table.
1900 template<bool finalize_external_strings>
1901 class StringTableCleaner : public ObjectVisitor {
1903 explicit StringTableCleaner(Heap* heap)
1904 : heap_(heap), pointers_removed_(0) { }
1906 virtual void VisitPointers(Object** start, Object** end) {
1907 // Visit all HeapObject pointers in [start, end).
1908 for (Object** p = start; p < end; p++) {
1910 if (o->IsHeapObject() &&
1911 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1912 if (finalize_external_strings) {
1913 ASSERT(o->IsExternalString());
1914 heap_->FinalizeExternalString(String::cast(*p));
1916 pointers_removed_++;
1918 // Set the entry to the_hole_value (as deleted).
1919 *p = heap_->the_hole_value();
1924 int PointersRemoved() {
1925 ASSERT(!finalize_external_strings);
1926 return pointers_removed_;
1931 int pointers_removed_;
1935 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1936 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1939 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1941 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1943 virtual Object* RetainAs(Object* object) {
1944 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1946 } else if (object->IsAllocationSite() &&
1947 !(AllocationSite::cast(object)->IsZombie())) {
1948 // "dead" AllocationSites need to live long enough for a traversal of new
1949 // space. These sites get a one-time reprieve.
1950 AllocationSite* site = AllocationSite::cast(object);
1952 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1961 // Fill the marking stack with overflowed objects returned by the given
1962 // iterator. Stop when the marking stack is filled or the end of the space
1963 // is reached, whichever comes first.
1965 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1966 MarkingDeque* marking_deque,
1968 // The caller should ensure that the marking stack is initially not full,
1969 // so that we don't waste effort pointlessly scanning for objects.
1970 ASSERT(!marking_deque->IsFull());
1972 Map* filler_map = heap->one_pointer_filler_map();
1973 for (HeapObject* object = it->Next();
1975 object = it->Next()) {
1976 MarkBit markbit = Marking::MarkBitFrom(object);
1977 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1978 Marking::GreyToBlack(markbit);
1979 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1980 marking_deque->PushBlack(object);
1981 if (marking_deque->IsFull()) return;
1987 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1990 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1992 ASSERT(!marking_deque->IsFull());
1993 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1994 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1995 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1996 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1998 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1999 Address cell_base = it.CurrentCellBase();
2000 MarkBit::CellType* cell = it.CurrentCell();
2002 const MarkBit::CellType current_cell = *cell;
2003 if (current_cell == 0) continue;
2005 MarkBit::CellType grey_objects;
2007 const MarkBit::CellType next_cell = *(cell+1);
2008 grey_objects = current_cell &
2009 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2011 grey_objects = current_cell & (current_cell >> 1);
2015 while (grey_objects != 0) {
2016 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2017 grey_objects >>= trailing_zeros;
2018 offset += trailing_zeros;
2019 MarkBit markbit(cell, 1 << offset, false);
2020 ASSERT(Marking::IsGrey(markbit));
2021 Marking::GreyToBlack(markbit);
2022 Address addr = cell_base + offset * kPointerSize;
2023 HeapObject* object = HeapObject::FromAddress(addr);
2024 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2025 marking_deque->PushBlack(object);
2026 if (marking_deque->IsFull()) return;
2031 grey_objects >>= (Bitmap::kBitsPerCell - 1);
2036 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
2037 NewSpace* new_space,
2039 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
2040 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
2041 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2042 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2044 MarkBit::CellType* cells = p->markbits()->cells();
2045 int survivors_size = 0;
2047 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2048 Address cell_base = it.CurrentCellBase();
2049 MarkBit::CellType* cell = it.CurrentCell();
2051 MarkBit::CellType current_cell = *cell;
2052 if (current_cell == 0) continue;
2055 while (current_cell != 0) {
2056 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2057 current_cell >>= trailing_zeros;
2058 offset += trailing_zeros;
2059 Address address = cell_base + offset * kPointerSize;
2060 HeapObject* object = HeapObject::FromAddress(address);
2062 int size = object->Size();
2063 survivors_size += size;
2065 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
2069 // Aggressively promote young survivors to the old space.
2070 if (TryPromoteObject(object, size)) {
2074 // Promotion failed. Just migrate object to another semispace.
2075 MaybeObject* allocation = new_space->AllocateRaw(size);
2076 if (allocation->IsFailure()) {
2077 if (!new_space->AddFreshPage()) {
2078 // Shouldn't happen. We are sweeping linearly, and to-space
2079 // has the same number of pages as from-space, so there is
2083 allocation = new_space->AllocateRaw(size);
2084 ASSERT(!allocation->IsFailure());
2086 Object* target = allocation->ToObjectUnchecked();
2088 MigrateObject(HeapObject::cast(target),
2095 return survivors_size;
2099 static void DiscoverGreyObjectsInSpace(Heap* heap,
2100 MarkingDeque* marking_deque,
2101 PagedSpace* space) {
2102 if (!space->was_swept_conservatively()) {
2103 HeapObjectIterator it(space);
2104 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2106 PageIterator it(space);
2107 while (it.has_next()) {
2108 Page* p = it.next();
2109 DiscoverGreyObjectsOnPage(marking_deque, p);
2110 if (marking_deque->IsFull()) return;
2116 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2117 MarkingDeque* marking_deque) {
2118 NewSpace* space = heap->new_space();
2119 NewSpacePageIterator it(space->bottom(), space->top());
2120 while (it.has_next()) {
2121 NewSpacePage* page = it.next();
2122 DiscoverGreyObjectsOnPage(marking_deque, page);
2123 if (marking_deque->IsFull()) return;
2128 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2130 if (!o->IsHeapObject()) return false;
2131 HeapObject* heap_object = HeapObject::cast(o);
2132 MarkBit mark = Marking::MarkBitFrom(heap_object);
2137 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2140 ASSERT(o->IsHeapObject());
2141 HeapObject* heap_object = HeapObject::cast(o);
2142 MarkBit mark = Marking::MarkBitFrom(heap_object);
2147 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2148 StringTable* string_table = heap()->string_table();
2149 // Mark the string table itself.
2150 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2151 SetMark(string_table, string_table_mark);
2152 // Explicitly mark the prefix.
2153 string_table->IteratePrefix(visitor);
2154 ProcessMarkingDeque();
2158 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2159 MarkBit mark_bit = Marking::MarkBitFrom(site);
2160 SetMark(site, mark_bit);
2164 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2165 // Mark the heap roots including global variables, stack variables,
2166 // etc., and all objects reachable from them.
2167 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2169 // Handle the string table specially.
2170 MarkStringTable(visitor);
2172 MarkWeakObjectToCodeTable();
2174 // There may be overflowed objects in the heap. Visit them now.
2175 while (marking_deque_.overflowed()) {
2176 RefillMarkingDeque();
2177 EmptyMarkingDeque();
2182 void MarkCompactCollector::MarkImplicitRefGroups() {
2183 List<ImplicitRefGroup*>* ref_groups =
2184 isolate()->global_handles()->implicit_ref_groups();
2187 for (int i = 0; i < ref_groups->length(); i++) {
2188 ImplicitRefGroup* entry = ref_groups->at(i);
2189 ASSERT(entry != NULL);
2191 if (!IsMarked(*entry->parent)) {
2192 (*ref_groups)[last++] = entry;
2196 Object*** children = entry->children;
2197 // A parent object is marked, so mark all child heap objects.
2198 for (size_t j = 0; j < entry->length; ++j) {
2199 if ((*children[j])->IsHeapObject()) {
2200 HeapObject* child = HeapObject::cast(*children[j]);
2201 MarkBit mark = Marking::MarkBitFrom(child);
2202 MarkObject(child, mark);
2206 // Once the entire group has been marked, dispose it because it's
2207 // not needed anymore.
2210 ref_groups->Rewind(last);
2214 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2215 HeapObject* weak_object_to_code_table =
2216 HeapObject::cast(heap()->weak_object_to_code_table());
2217 if (!IsMarked(weak_object_to_code_table)) {
2218 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2219 SetMark(weak_object_to_code_table, mark);
2224 // Mark all objects reachable from the objects on the marking stack.
2225 // Before: the marking stack contains zero or more heap object pointers.
2226 // After: the marking stack is empty, and all objects reachable from the
2227 // marking stack have been marked, or are overflowed in the heap.
2228 void MarkCompactCollector::EmptyMarkingDeque() {
2229 while (!marking_deque_.IsEmpty()) {
2230 HeapObject* object = marking_deque_.Pop();
2231 ASSERT(object->IsHeapObject());
2232 ASSERT(heap()->Contains(object));
2233 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2235 Map* map = object->map();
2236 MarkBit map_mark = Marking::MarkBitFrom(map);
2237 MarkObject(map, map_mark);
2239 MarkCompactMarkingVisitor::IterateBody(map, object);
2244 // Sweep the heap for overflowed objects, clear their overflow bits, and
2245 // push them on the marking stack. Stop early if the marking stack fills
2246 // before sweeping completes. If sweeping completes, there are no remaining
2247 // overflowed objects in the heap so the overflow flag on the markings stack
2249 void MarkCompactCollector::RefillMarkingDeque() {
2250 ASSERT(marking_deque_.overflowed());
2252 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2253 if (marking_deque_.IsFull()) return;
2255 DiscoverGreyObjectsInSpace(heap(),
2257 heap()->old_pointer_space());
2258 if (marking_deque_.IsFull()) return;
2260 DiscoverGreyObjectsInSpace(heap(),
2262 heap()->old_data_space());
2263 if (marking_deque_.IsFull()) return;
2265 DiscoverGreyObjectsInSpace(heap(),
2267 heap()->code_space());
2268 if (marking_deque_.IsFull()) return;
2270 DiscoverGreyObjectsInSpace(heap(),
2272 heap()->map_space());
2273 if (marking_deque_.IsFull()) return;
2275 DiscoverGreyObjectsInSpace(heap(),
2277 heap()->cell_space());
2278 if (marking_deque_.IsFull()) return;
2280 DiscoverGreyObjectsInSpace(heap(),
2282 heap()->property_cell_space());
2283 if (marking_deque_.IsFull()) return;
2285 LargeObjectIterator lo_it(heap()->lo_space());
2286 DiscoverGreyObjectsWithIterator(heap(),
2289 if (marking_deque_.IsFull()) return;
2291 marking_deque_.ClearOverflowed();
2295 // Mark all objects reachable (transitively) from objects on the marking
2296 // stack. Before: the marking stack contains zero or more heap object
2297 // pointers. After: the marking stack is empty and there are no overflowed
2298 // objects in the heap.
2299 void MarkCompactCollector::ProcessMarkingDeque() {
2300 EmptyMarkingDeque();
2301 while (marking_deque_.overflowed()) {
2302 RefillMarkingDeque();
2303 EmptyMarkingDeque();
2308 // Mark all objects reachable (transitively) from objects on the marking
2309 // stack including references only considered in the atomic marking pause.
2310 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2311 bool work_to_do = true;
2312 ASSERT(marking_deque_.IsEmpty());
2313 while (work_to_do) {
2314 isolate()->global_handles()->IterateObjectGroups(
2315 visitor, &IsUnmarkedHeapObjectWithHeap);
2316 MarkImplicitRefGroups();
2317 ProcessWeakCollections();
2318 work_to_do = !marking_deque_.IsEmpty();
2319 ProcessMarkingDeque();
2324 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2325 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2326 !it.done(); it.Advance()) {
2327 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2330 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2331 Code* code = it.frame()->LookupCode();
2332 if (!code->CanDeoptAt(it.frame()->pc())) {
2333 code->CodeIterateBody(visitor);
2335 ProcessMarkingDeque();
2342 void MarkCompactCollector::MarkLiveObjects() {
2343 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2344 // The recursive GC marker detects when it is nearing stack overflow,
2345 // and switches to a different marking system. JS interrupts interfere
2346 // with the C stack limit check.
2347 PostponeInterruptsScope postpone(isolate());
2349 bool incremental_marking_overflowed = false;
2350 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2351 if (was_marked_incrementally_) {
2352 // Finalize the incremental marking and check whether we had an overflow.
2353 // Both markers use grey color to mark overflowed objects so
2354 // non-incremental marker can deal with them as if overflow
2355 // occured during normal marking.
2356 // But incremental marker uses a separate marking deque
2357 // so we have to explicitly copy its overflow state.
2358 incremental_marking->Finalize();
2359 incremental_marking_overflowed =
2360 incremental_marking->marking_deque()->overflowed();
2361 incremental_marking->marking_deque()->ClearOverflowed();
2363 // Abort any pending incremental activities e.g. incremental sweeping.
2364 incremental_marking->Abort();
2368 ASSERT(state_ == PREPARE_GC);
2369 state_ = MARK_LIVE_OBJECTS;
2371 // The to space contains live objects, a page in from space is used as a
2373 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2374 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2375 if (FLAG_force_marking_deque_overflows) {
2376 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2378 marking_deque_.Initialize(marking_deque_start,
2380 ASSERT(!marking_deque_.overflowed());
2382 if (incremental_marking_overflowed) {
2383 // There are overflowed objects left in the heap after incremental marking.
2384 marking_deque_.SetOverflowed();
2387 PrepareForCodeFlushing();
2389 if (was_marked_incrementally_) {
2390 // There is no write barrier on cells so we have to scan them now at the end
2391 // of the incremental marking.
2393 HeapObjectIterator cell_iterator(heap()->cell_space());
2395 while ((cell = cell_iterator.Next()) != NULL) {
2396 ASSERT(cell->IsCell());
2397 if (IsMarked(cell)) {
2398 int offset = Cell::kValueOffset;
2399 MarkCompactMarkingVisitor::VisitPointer(
2401 reinterpret_cast<Object**>(cell->address() + offset));
2406 HeapObjectIterator js_global_property_cell_iterator(
2407 heap()->property_cell_space());
2409 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2410 ASSERT(cell->IsPropertyCell());
2411 if (IsMarked(cell)) {
2412 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2418 RootMarkingVisitor root_visitor(heap());
2419 MarkRoots(&root_visitor);
2421 ProcessTopOptimizedFrame(&root_visitor);
2423 // The objects reachable from the roots are marked, yet unreachable
2424 // objects are unmarked. Mark objects reachable due to host
2425 // application specific logic or through Harmony weak maps.
2426 ProcessEphemeralMarking(&root_visitor);
2428 // The objects reachable from the roots, weak maps or object groups
2429 // are marked, yet unreachable objects are unmarked. Mark objects
2430 // reachable only from weak global handles.
2432 // First we identify nonlive weak handles and mark them as pending
2434 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2435 &IsUnmarkedHeapObject);
2436 // Then we mark the objects and process the transitive closure.
2437 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2438 while (marking_deque_.overflowed()) {
2439 RefillMarkingDeque();
2440 EmptyMarkingDeque();
2443 // Repeat host application specific and Harmony weak maps marking to
2444 // mark unmarked objects reachable from the weak roots.
2445 ProcessEphemeralMarking(&root_visitor);
2451 void MarkCompactCollector::AfterMarking() {
2452 // Object literal map caches reference strings (cache keys) and maps
2453 // (cache values). At this point still useful maps have already been
2454 // marked. Mark the keys for the alive values before we process the
2458 // Prune the string table removing all strings only pointed to by the
2459 // string table. Cannot use string_table() here because the string
2461 StringTable* string_table = heap()->string_table();
2462 InternalizedStringTableCleaner internalized_visitor(heap());
2463 string_table->IterateElements(&internalized_visitor);
2464 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2466 ExternalStringTableCleaner external_visitor(heap());
2467 heap()->external_string_table_.Iterate(&external_visitor);
2468 heap()->external_string_table_.CleanUp();
2470 // Process the weak references.
2471 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2472 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2474 // Remove object groups after marking phase.
2475 heap()->isolate()->global_handles()->RemoveObjectGroups();
2476 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2478 // Flush code from collected candidates.
2479 if (is_code_flushing_enabled()) {
2480 code_flusher_->ProcessCandidates();
2481 // If incremental marker does not support code flushing, we need to
2482 // disable it before incremental marking steps for next cycle.
2483 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2484 EnableCodeFlushing(false);
2488 if (FLAG_track_gc_object_stats) {
2489 heap()->CheckpointObjectStats();
2494 void MarkCompactCollector::ProcessMapCaches() {
2495 Object* raw_context = heap()->native_contexts_list_;
2496 while (raw_context != heap()->undefined_value()) {
2497 Context* context = reinterpret_cast<Context*>(raw_context);
2498 if (IsMarked(context)) {
2499 HeapObject* raw_map_cache =
2500 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2501 // A map cache may be reachable from the stack. In this case
2502 // it's already transitively marked and it's too late to clean
2504 if (!IsMarked(raw_map_cache) &&
2505 raw_map_cache != heap()->undefined_value()) {
2506 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2507 int existing_elements = map_cache->NumberOfElements();
2508 int used_elements = 0;
2509 for (int i = MapCache::kElementsStartIndex;
2510 i < map_cache->length();
2511 i += MapCache::kEntrySize) {
2512 Object* raw_key = map_cache->get(i);
2513 if (raw_key == heap()->undefined_value() ||
2514 raw_key == heap()->the_hole_value()) continue;
2515 STATIC_ASSERT(MapCache::kEntrySize == 2);
2516 Object* raw_map = map_cache->get(i + 1);
2517 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2520 // Delete useless entries with unmarked maps.
2521 ASSERT(raw_map->IsMap());
2522 map_cache->set_the_hole(i);
2523 map_cache->set_the_hole(i + 1);
2526 if (used_elements == 0) {
2527 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2529 // Note: we don't actually shrink the cache here to avoid
2530 // extra complexity during GC. We rely on subsequent cache
2531 // usages (EnsureCapacity) to do this.
2532 map_cache->ElementsRemoved(existing_elements - used_elements);
2533 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2534 MarkObject(map_cache, map_cache_markbit);
2538 // Move to next element in the list.
2539 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2541 ProcessMarkingDeque();
2545 void MarkCompactCollector::ReattachInitialMaps() {
2546 HeapObjectIterator map_iterator(heap()->map_space());
2547 for (HeapObject* obj = map_iterator.Next();
2549 obj = map_iterator.Next()) {
2550 Map* map = Map::cast(obj);
2552 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2553 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2555 if (map->attached_to_shared_function_info()) {
2556 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2562 void MarkCompactCollector::ClearNonLiveReferences() {
2563 // Iterate over the map space, setting map transitions that go from
2564 // a marked map to an unmarked map to null transitions. This action
2565 // is carried out only on maps of JSObjects and related subtypes.
2566 HeapObjectIterator map_iterator(heap()->map_space());
2567 for (HeapObject* obj = map_iterator.Next();
2569 obj = map_iterator.Next()) {
2570 Map* map = Map::cast(obj);
2572 if (!map->CanTransition()) continue;
2574 MarkBit map_mark = Marking::MarkBitFrom(map);
2575 if (map_mark.Get() && map->attached_to_shared_function_info()) {
2576 // This map is used for inobject slack tracking and has been detached
2577 // from SharedFunctionInfo during the mark phase.
2578 // Since it survived the GC, reattach it now.
2579 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2582 ClearNonLivePrototypeTransitions(map);
2583 ClearNonLiveMapTransitions(map, map_mark);
2585 if (map_mark.Get()) {
2586 ClearNonLiveDependentCode(map->dependent_code());
2588 ClearAndDeoptimizeDependentCode(map->dependent_code());
2589 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2593 // Iterate over property cell space, removing dependent code that is not
2594 // otherwise kept alive by strong references.
2595 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2596 for (HeapObject* cell = cell_iterator.Next();
2598 cell = cell_iterator.Next()) {
2599 if (IsMarked(cell)) {
2600 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2604 // Iterate over allocation sites, removing dependent code that is not
2605 // otherwise kept alive by strong references.
2606 Object* undefined = heap()->undefined_value();
2607 for (Object* site = heap()->allocation_sites_list();
2609 site = AllocationSite::cast(site)->weak_next()) {
2610 if (IsMarked(site)) {
2611 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2615 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2616 WeakHashTable* table =
2617 WeakHashTable::cast(heap_->weak_object_to_code_table());
2618 uint32_t capacity = table->Capacity();
2619 for (uint32_t i = 0; i < capacity; i++) {
2620 uint32_t key_index = table->EntryToIndex(i);
2621 Object* key = table->get(key_index);
2622 if (!table->IsKey(key)) continue;
2623 uint32_t value_index = table->EntryToValueIndex(i);
2624 Object* value = table->get(value_index);
2625 if (key->IsCell() && !IsMarked(key)) {
2626 Cell* cell = Cell::cast(key);
2627 Object* object = cell->value();
2628 if (IsMarked(object)) {
2629 MarkBit mark = Marking::MarkBitFrom(cell);
2630 SetMark(cell, mark);
2631 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2632 RecordSlot(value_slot, value_slot, *value_slot);
2635 if (IsMarked(key)) {
2636 if (!IsMarked(value)) {
2637 HeapObject* obj = HeapObject::cast(value);
2638 MarkBit mark = Marking::MarkBitFrom(obj);
2641 ClearNonLiveDependentCode(DependentCode::cast(value));
2643 ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2644 table->set(key_index, heap_->the_hole_value());
2645 table->set(value_index, heap_->the_hole_value());
2652 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2653 int number_of_transitions = map->NumberOfProtoTransitions();
2654 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2656 int new_number_of_transitions = 0;
2657 const int header = Map::kProtoTransitionHeaderSize;
2658 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2659 const int map_offset = header + Map::kProtoTransitionMapOffset;
2660 const int step = Map::kProtoTransitionElementsPerEntry;
2661 for (int i = 0; i < number_of_transitions; i++) {
2662 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2663 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2664 if (IsMarked(prototype) && IsMarked(cached_map)) {
2665 ASSERT(!prototype->IsUndefined());
2666 int proto_index = proto_offset + new_number_of_transitions * step;
2667 int map_index = map_offset + new_number_of_transitions * step;
2668 if (new_number_of_transitions != i) {
2669 prototype_transitions->set(
2672 UPDATE_WRITE_BARRIER);
2673 prototype_transitions->set(
2676 SKIP_WRITE_BARRIER);
2678 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2679 RecordSlot(slot, slot, prototype);
2680 new_number_of_transitions++;
2684 if (new_number_of_transitions != number_of_transitions) {
2685 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2688 // Fill slots that became free with undefined value.
2689 for (int i = new_number_of_transitions * step;
2690 i < number_of_transitions * step;
2692 prototype_transitions->set_undefined(header + i);
2697 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2699 Object* potential_parent = map->GetBackPointer();
2700 if (!potential_parent->IsMap()) return;
2701 Map* parent = Map::cast(potential_parent);
2703 // Follow back pointer, check whether we are dealing with a map transition
2704 // from a live map to a dead path and in case clear transitions of parent.
2705 bool current_is_alive = map_mark.Get();
2706 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2707 if (!current_is_alive && parent_is_alive) {
2708 parent->ClearNonLiveTransitions(heap());
2713 void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2714 DependentCode* entries) {
2715 DisallowHeapAllocation no_allocation;
2716 DependentCode::GroupStartIndexes starts(entries);
2717 int number_of_entries = starts.number_of_entries();
2718 if (number_of_entries == 0) return;
2719 for (int i = 0; i < number_of_entries; i++) {
2720 // If the entry is compilation info then the map must be alive,
2721 // and ClearAndDeoptimizeDependentCode shouldn't be called.
2722 ASSERT(entries->is_code_at(i));
2723 Code* code = entries->code_at(i);
2725 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2726 code->set_marked_for_deoptimization(true);
2727 code->InvalidateEmbeddedObjects();
2728 have_code_to_deoptimize_ = true;
2730 entries->clear_at(i);
2735 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2736 DisallowHeapAllocation no_allocation;
2737 DependentCode::GroupStartIndexes starts(entries);
2738 int number_of_entries = starts.number_of_entries();
2739 if (number_of_entries == 0) return;
2740 int new_number_of_entries = 0;
2741 // Go through all groups, remove dead codes and compact.
2742 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2743 int group_number_of_entries = 0;
2744 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2745 Object* obj = entries->object_at(i);
2746 ASSERT(obj->IsCode() || IsMarked(obj));
2747 if (IsMarked(obj) &&
2748 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2749 if (new_number_of_entries + group_number_of_entries != i) {
2750 entries->set_object_at(
2751 new_number_of_entries + group_number_of_entries, obj);
2753 Object** slot = entries->slot_at(new_number_of_entries +
2754 group_number_of_entries);
2755 RecordSlot(slot, slot, obj);
2756 group_number_of_entries++;
2759 entries->set_number_of_entries(
2760 static_cast<DependentCode::DependencyGroup>(g),
2761 group_number_of_entries);
2762 new_number_of_entries += group_number_of_entries;
2764 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2765 entries->clear_at(i);
2770 void MarkCompactCollector::ProcessWeakCollections() {
2771 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2772 Object* weak_collection_obj = encountered_weak_collections();
2773 while (weak_collection_obj != Smi::FromInt(0)) {
2774 ASSERT(MarkCompactCollector::IsMarked(
2775 HeapObject::cast(weak_collection_obj)));
2776 JSWeakCollection* weak_collection =
2777 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2778 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2779 Object** anchor = reinterpret_cast<Object**>(table->address());
2780 for (int i = 0; i < table->Capacity(); i++) {
2781 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2783 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2784 RecordSlot(anchor, key_slot, *key_slot);
2785 Object** value_slot =
2786 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2787 MarkCompactMarkingVisitor::MarkObjectByPointer(
2788 this, anchor, value_slot);
2791 weak_collection_obj = weak_collection->next();
2796 void MarkCompactCollector::ClearWeakCollections() {
2797 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2798 Object* weak_collection_obj = encountered_weak_collections();
2799 while (weak_collection_obj != Smi::FromInt(0)) {
2800 ASSERT(MarkCompactCollector::IsMarked(
2801 HeapObject::cast(weak_collection_obj)));
2802 JSWeakCollection* weak_collection =
2803 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2804 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2805 for (int i = 0; i < table->Capacity(); i++) {
2806 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2807 table->RemoveEntry(i);
2810 weak_collection_obj = weak_collection->next();
2811 weak_collection->set_next(Smi::FromInt(0));
2813 set_encountered_weak_collections(Smi::FromInt(0));
2817 // We scavange new space simultaneously with sweeping. This is done in two
2820 // The first pass migrates all alive objects from one semispace to another or
2821 // promotes them to old space. Forwarding address is written directly into
2822 // first word of object without any encoding. If object is dead we write
2823 // NULL as a forwarding address.
2825 // The second pass updates pointers to new space in all spaces. It is possible
2826 // to encounter pointers to dead new space objects during traversal of pointers
2827 // to new space. We should clear them to avoid encountering them during next
2828 // pointer iteration. This is an issue if the store buffer overflows and we
2829 // have to scan the entire old space, including dead objects, looking for
2830 // pointers to new space.
2831 void MarkCompactCollector::MigrateObject(HeapObject* dst,
2834 AllocationSpace dest) {
2835 Address dst_addr = dst->address();
2836 Address src_addr = src->address();
2837 HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2838 if (heap_profiler->is_tracking_object_moves()) {
2839 heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
2841 ASSERT(heap()->AllowedToBeMigrated(src, dest));
2842 ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2843 if (dest == OLD_POINTER_SPACE) {
2844 Address src_slot = src_addr;
2845 Address dst_slot = dst_addr;
2846 ASSERT(IsAligned(size, kPointerSize));
2848 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2849 Object* value = Memory::Object_at(src_slot);
2851 Memory::Object_at(dst_slot) = value;
2853 if (heap_->InNewSpace(value)) {
2854 heap_->store_buffer()->Mark(dst_slot);
2855 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2856 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2857 &migration_slots_buffer_,
2858 reinterpret_cast<Object**>(dst_slot),
2859 SlotsBuffer::IGNORE_OVERFLOW);
2862 src_slot += kPointerSize;
2863 dst_slot += kPointerSize;
2866 if (compacting_ && dst->IsJSFunction()) {
2867 Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
2868 Address code_entry = Memory::Address_at(code_entry_slot);
2870 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2871 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2872 &migration_slots_buffer_,
2873 SlotsBuffer::CODE_ENTRY_SLOT,
2875 SlotsBuffer::IGNORE_OVERFLOW);
2877 } else if (compacting_ && dst->IsConstantPoolArray()) {
2878 ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
2879 for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
2880 Address code_entry_slot =
2881 dst_addr + constant_pool->OffsetOfElementAt(i);
2882 Address code_entry = Memory::Address_at(code_entry_slot);
2884 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2885 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2886 &migration_slots_buffer_,
2887 SlotsBuffer::CODE_ENTRY_SLOT,
2889 SlotsBuffer::IGNORE_OVERFLOW);
2893 } else if (dest == CODE_SPACE) {
2894 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2895 heap()->MoveBlock(dst_addr, src_addr, size);
2896 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2897 &migration_slots_buffer_,
2898 SlotsBuffer::RELOCATED_CODE_OBJECT,
2900 SlotsBuffer::IGNORE_OVERFLOW);
2901 Code::cast(dst)->Relocate(dst_addr - src_addr);
2903 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2904 heap()->MoveBlock(dst_addr, src_addr, size);
2906 Memory::Address_at(src_addr) = dst_addr;
2910 // Visitor for updating pointers from live objects in old spaces to new space.
2911 // It does not expect to encounter pointers to dead objects.
2912 class PointersUpdatingVisitor: public ObjectVisitor {
2914 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2916 void VisitPointer(Object** p) {
2920 void VisitPointers(Object** start, Object** end) {
2921 for (Object** p = start; p < end; p++) UpdatePointer(p);
2924 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2925 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2926 Object* target = rinfo->target_object();
2927 Object* old_target = target;
2928 VisitPointer(&target);
2929 // Avoid unnecessary changes that might unnecessary flush the instruction
2931 if (target != old_target) {
2932 rinfo->set_target_object(target);
2936 void VisitCodeTarget(RelocInfo* rinfo) {
2937 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2938 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2939 Object* old_target = target;
2940 VisitPointer(&target);
2941 if (target != old_target) {
2942 rinfo->set_target_address(Code::cast(target)->instruction_start());
2946 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2947 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2948 Object* stub = rinfo->code_age_stub();
2949 ASSERT(stub != NULL);
2950 VisitPointer(&stub);
2951 if (stub != rinfo->code_age_stub()) {
2952 rinfo->set_code_age_stub(Code::cast(stub));
2956 void VisitDebugTarget(RelocInfo* rinfo) {
2957 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2958 rinfo->IsPatchedReturnSequence()) ||
2959 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2960 rinfo->IsPatchedDebugBreakSlotSequence()));
2961 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2962 VisitPointer(&target);
2963 rinfo->set_call_address(Code::cast(target)->instruction_start());
2966 static inline void UpdateSlot(Heap* heap, Object** slot) {
2967 Object* obj = *slot;
2969 if (!obj->IsHeapObject()) return;
2971 HeapObject* heap_obj = HeapObject::cast(obj);
2973 MapWord map_word = heap_obj->map_word();
2974 if (map_word.IsForwardingAddress()) {
2975 ASSERT(heap->InFromSpace(heap_obj) ||
2976 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2977 HeapObject* target = map_word.ToForwardingAddress();
2979 ASSERT(!heap->InFromSpace(target) &&
2980 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2985 inline void UpdatePointer(Object** p) {
2986 UpdateSlot(heap_, p);
2993 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2994 ASSERT(*p == object);
2996 Address old_addr = object->address();
2998 Address new_addr = Memory::Address_at(old_addr);
3000 // The new space sweep will overwrite the map word of dead objects
3001 // with NULL. In this case we do not need to transfer this entry to
3002 // the store buffer which we are rebuilding.
3003 if (new_addr != NULL) {
3004 *p = HeapObject::FromAddress(new_addr);
3006 // We have to zap this pointer, because the store buffer may overflow later,
3007 // and then we have to scan the entire heap and we don't want to find
3008 // spurious newspace pointers in the old space.
3009 // TODO(mstarzinger): This was changed to a sentinel value to track down
3010 // rare crashes, change it back to Smi::FromInt(0) later.
3011 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
3016 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
3018 MapWord map_word = HeapObject::cast(*p)->map_word();
3020 if (map_word.IsForwardingAddress()) {
3021 return String::cast(map_word.ToForwardingAddress());
3024 return String::cast(*p);
3028 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3030 // TODO(hpayer): Replace that check with an assert.
3031 CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3033 OldSpace* target_space = heap()->TargetSpace(object);
3035 ASSERT(target_space == heap()->old_pointer_space() ||
3036 target_space == heap()->old_data_space());
3038 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
3039 if (maybe_result->ToObject(&result)) {
3040 HeapObject* target = HeapObject::cast(result);
3041 MigrateObject(target,
3044 target_space->identity());
3045 heap()->mark_compact_collector()->tracer()->
3046 increment_promoted_objects_size(object_size);
3054 void MarkCompactCollector::EvacuateNewSpace() {
3055 // There are soft limits in the allocation code, designed trigger a mark
3056 // sweep collection by failing allocations. But since we are already in
3057 // a mark-sweep allocation, there is no sense in trying to trigger one.
3058 AlwaysAllocateScope scope(isolate());
3059 heap()->CheckNewSpaceExpansionCriteria();
3061 NewSpace* new_space = heap()->new_space();
3063 // Store allocation range before flipping semispaces.
3064 Address from_bottom = new_space->bottom();
3065 Address from_top = new_space->top();
3067 // Flip the semispaces. After flipping, to space is empty, from space has
3070 new_space->ResetAllocationInfo();
3072 int survivors_size = 0;
3074 // First pass: traverse all objects in inactive semispace, remove marks,
3075 // migrate live objects and write forwarding addresses. This stage puts
3076 // new entries in the store buffer and may cause some pages to be marked
3077 // scan-on-scavenge.
3078 NewSpacePageIterator it(from_bottom, from_top);
3079 while (it.has_next()) {
3080 NewSpacePage* p = it.next();
3081 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3084 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3085 new_space->set_age_mark(new_space->top());
3089 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3090 AlwaysAllocateScope always_allocate(isolate());
3091 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3092 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3093 p->MarkSweptPrecisely();
3097 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3098 Address cell_base = it.CurrentCellBase();
3099 MarkBit::CellType* cell = it.CurrentCell();
3101 if (*cell == 0) continue;
3103 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3104 for (int i = 0; i < live_objects; i++) {
3105 Address object_addr = cell_base + offsets[i] * kPointerSize;
3106 HeapObject* object = HeapObject::FromAddress(object_addr);
3107 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3109 int size = object->Size();
3111 MaybeObject* target = space->AllocateRaw(size);
3112 if (target->IsFailure()) {
3113 // OS refused to give us memory.
3114 V8::FatalProcessOutOfMemory("Evacuation");
3118 Object* target_object = target->ToObjectUnchecked();
3120 MigrateObject(HeapObject::cast(target_object),
3124 ASSERT(object->map_word().IsForwardingAddress());
3127 // Clear marking bits for current cell.
3130 p->ResetLiveBytes();
3134 void MarkCompactCollector::EvacuatePages() {
3135 int npages = evacuation_candidates_.length();
3136 for (int i = 0; i < npages; i++) {
3137 Page* p = evacuation_candidates_[i];
3138 // TODO(hpayer): This check is just used for debugging purpose and
3139 // should be removed or turned into an assert after investigating the
3140 // crash in concurrent sweeping.
3141 CHECK(p->IsEvacuationCandidate() ||
3142 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3143 CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
3144 if (p->IsEvacuationCandidate()) {
3145 // During compaction we might have to request a new page.
3146 // Check that space still have room for that.
3147 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3148 EvacuateLiveObjectsFromPage(p);
3150 // Without room for expansion evacuation is not guaranteed to succeed.
3151 // Pessimistically abandon unevacuated pages.
3152 for (int j = i; j < npages; j++) {
3153 Page* page = evacuation_candidates_[j];
3154 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3155 page->ClearEvacuationCandidate();
3156 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3157 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3166 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3168 virtual Object* RetainAs(Object* object) {
3169 if (object->IsHeapObject()) {
3170 HeapObject* heap_object = HeapObject::cast(object);
3171 MapWord map_word = heap_object->map_word();
3172 if (map_word.IsForwardingAddress()) {
3173 return map_word.ToForwardingAddress();
3181 static inline void UpdateSlot(Isolate* isolate,
3183 SlotsBuffer::SlotType slot_type,
3185 switch (slot_type) {
3186 case SlotsBuffer::CODE_TARGET_SLOT: {
3187 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3188 rinfo.Visit(isolate, v);
3191 case SlotsBuffer::CODE_ENTRY_SLOT: {
3192 v->VisitCodeEntry(addr);
3195 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3196 HeapObject* obj = HeapObject::FromAddress(addr);
3197 Code::cast(obj)->CodeIterateBody(v);
3200 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3201 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3202 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3205 case SlotsBuffer::JS_RETURN_SLOT: {
3206 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3207 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3210 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3211 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3212 rinfo.Visit(isolate, v);
3224 SWEEP_AND_VISIT_LIVE_OBJECTS
3228 enum SkipListRebuildingMode {
3234 enum FreeSpaceTreatmentMode {
3240 // Sweep a space precisely. After this has been done the space can
3241 // be iterated precisely, hitting only the live objects. Code space
3242 // is always swept precisely because we want to be able to iterate
3243 // over it. Map space is swept precisely, because it is not compacted.
3244 // Slots in live objects pointing into evacuation candidates are updated
3246 template<SweepingMode sweeping_mode,
3247 SkipListRebuildingMode skip_list_mode,
3248 FreeSpaceTreatmentMode free_space_mode>
3249 static void SweepPrecisely(PagedSpace* space,
3252 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3253 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3254 space->identity() == CODE_SPACE);
3255 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3257 double start_time = 0.0;
3258 if (FLAG_print_cumulative_gc_stat) {
3259 start_time = OS::TimeCurrentMillis();
3262 p->MarkSweptPrecisely();
3264 Address free_start = p->area_start();
3265 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3268 SkipList* skip_list = p->skip_list();
3269 int curr_region = -1;
3270 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3274 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3275 Address cell_base = it.CurrentCellBase();
3276 MarkBit::CellType* cell = it.CurrentCell();
3277 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3279 for ( ; live_objects != 0; live_objects--) {
3280 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3281 if (free_end != free_start) {
3282 if (free_space_mode == ZAP_FREE_SPACE) {
3283 memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
3285 space->Free(free_start, static_cast<int>(free_end - free_start));
3286 #ifdef ENABLE_GDB_JIT_INTERFACE
3287 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3288 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3292 HeapObject* live_object = HeapObject::FromAddress(free_end);
3293 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3294 Map* map = live_object->map();
3295 int size = live_object->SizeFromMap(map);
3296 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3297 live_object->IterateBody(map->instance_type(), size, v);
3299 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3300 int new_region_start =
3301 SkipList::RegionNumber(free_end);
3302 int new_region_end =
3303 SkipList::RegionNumber(free_end + size - kPointerSize);
3304 if (new_region_start != curr_region ||
3305 new_region_end != curr_region) {
3306 skip_list->AddObject(free_end, size);
3307 curr_region = new_region_end;
3310 free_start = free_end + size;
3312 // Clear marking bits for current cell.
3315 if (free_start != p->area_end()) {
3316 if (free_space_mode == ZAP_FREE_SPACE) {
3317 memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
3319 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3320 #ifdef ENABLE_GDB_JIT_INTERFACE
3321 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3322 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3326 p->ResetLiveBytes();
3327 if (FLAG_print_cumulative_gc_stat) {
3328 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3333 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3334 Page* p = Page::FromAddress(code->address());
3336 if (p->IsEvacuationCandidate() ||
3337 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3341 Address code_start = code->address();
3342 Address code_end = code_start + code->Size();
3344 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3345 uint32_t end_index =
3346 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3348 Bitmap* b = p->markbits();
3350 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3351 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3353 MarkBit::CellType* start_cell = start_mark_bit.cell();
3354 MarkBit::CellType* end_cell = end_mark_bit.cell();
3357 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3358 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3360 if (start_cell == end_cell) {
3361 *start_cell |= start_mask & end_mask;
3363 *start_cell |= start_mask;
3364 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3367 *end_cell |= end_mask;
3370 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3379 static bool IsOnInvalidatedCodeObject(Address addr) {
3380 // We did not record any slots in large objects thus
3381 // we can safely go to the page from the slot address.
3382 Page* p = Page::FromAddress(addr);
3384 // First check owner's identity because old pointer and old data spaces
3385 // are swept lazily and might still have non-zero mark-bits on some
3387 if (p->owner()->identity() != CODE_SPACE) return false;
3389 // In code space only bits on evacuation candidates (but we don't record
3390 // any slots on them) and under invalidated code objects are non-zero.
3392 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3394 return mark_bit.Get();
3398 void MarkCompactCollector::InvalidateCode(Code* code) {
3399 if (heap_->incremental_marking()->IsCompacting() &&
3400 !ShouldSkipEvacuationSlotRecording(code)) {
3401 ASSERT(compacting_);
3403 // If the object is white than no slots were recorded on it yet.
3404 MarkBit mark_bit = Marking::MarkBitFrom(code);
3405 if (Marking::IsWhite(mark_bit)) return;
3407 invalidated_code_.Add(code);
3412 // Return true if the given code is deoptimized or will be deoptimized.
3413 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3414 return code->marked_for_deoptimization();
3418 bool MarkCompactCollector::MarkInvalidatedCode() {
3419 bool code_marked = false;
3421 int length = invalidated_code_.length();
3422 for (int i = 0; i < length; i++) {
3423 Code* code = invalidated_code_[i];
3425 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3434 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3435 int length = invalidated_code_.length();
3436 for (int i = 0; i < length; i++) {
3437 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3442 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3443 int length = invalidated_code_.length();
3444 for (int i = 0; i < length; i++) {
3445 Code* code = invalidated_code_[i];
3447 code->Iterate(visitor);
3448 SetMarkBitsUnderInvalidatedCode(code, false);
3451 invalidated_code_.Rewind(0);
3455 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3456 Heap::RelocationLock relocation_lock(heap());
3458 bool code_slots_filtering_required;
3459 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3460 code_slots_filtering_required = MarkInvalidatedCode();
3464 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3468 // Second pass: find pointers to new space and update them.
3469 PointersUpdatingVisitor updating_visitor(heap());
3471 { GCTracer::Scope gc_scope(tracer_,
3472 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3473 // Update pointers in to space.
3474 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3475 heap()->new_space()->top());
3476 for (HeapObject* object = to_it.Next();
3478 object = to_it.Next()) {
3479 Map* map = object->map();
3480 object->IterateBody(map->instance_type(),
3481 object->SizeFromMap(map),
3486 { GCTracer::Scope gc_scope(tracer_,
3487 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3489 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3492 { GCTracer::Scope gc_scope(tracer_,
3493 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3494 StoreBufferRebuildScope scope(heap_,
3495 heap_->store_buffer(),
3496 &Heap::ScavengeStoreBufferCallback);
3497 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3501 { GCTracer::Scope gc_scope(tracer_,
3502 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3503 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3504 migration_slots_buffer_,
3505 code_slots_filtering_required);
3506 if (FLAG_trace_fragmentation) {
3507 PrintF(" migration slots buffer: %d\n",
3508 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3511 if (compacting_ && was_marked_incrementally_) {
3512 // It's difficult to filter out slots recorded for large objects.
3513 LargeObjectIterator it(heap_->lo_space());
3514 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3515 // LargeObjectSpace is not swept yet thus we have to skip
3516 // dead objects explicitly.
3517 if (!IsMarked(obj)) continue;
3519 Page* p = Page::FromAddress(obj->address());
3520 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3521 obj->Iterate(&updating_visitor);
3522 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3528 int npages = evacuation_candidates_.length();
3529 { GCTracer::Scope gc_scope(
3530 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3531 for (int i = 0; i < npages; i++) {
3532 Page* p = evacuation_candidates_[i];
3533 ASSERT(p->IsEvacuationCandidate() ||
3534 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3536 if (p->IsEvacuationCandidate()) {
3537 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3539 code_slots_filtering_required);
3540 if (FLAG_trace_fragmentation) {
3541 PrintF(" page %p slots buffer: %d\n",
3542 reinterpret_cast<void*>(p),
3543 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3546 // Important: skip list should be cleared only after roots were updated
3547 // because root iteration traverses the stack and might have to find
3548 // code objects from non-updated pc pointing into evacuation candidate.
3549 SkipList* list = p->skip_list();
3550 if (list != NULL) list->Clear();
3552 if (FLAG_gc_verbose) {
3553 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3554 reinterpret_cast<intptr_t>(p));
3556 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3557 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3559 switch (space->identity()) {
3560 case OLD_DATA_SPACE:
3561 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3563 case OLD_POINTER_SPACE:
3564 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3567 space, p, &updating_visitor);
3570 if (FLAG_zap_code_space) {
3571 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3574 space, p, &updating_visitor);
3576 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
3579 space, p, &updating_visitor);
3590 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3592 // Update pointers from cells.
3593 HeapObjectIterator cell_iterator(heap_->cell_space());
3594 for (HeapObject* cell = cell_iterator.Next();
3596 cell = cell_iterator.Next()) {
3597 if (cell->IsCell()) {
3598 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3602 HeapObjectIterator js_global_property_cell_iterator(
3603 heap_->property_cell_space());
3604 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3606 cell = js_global_property_cell_iterator.Next()) {
3607 if (cell->IsPropertyCell()) {
3608 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3612 // Update the head of the native contexts list in the heap.
3613 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3615 heap_->string_table()->Iterate(&updating_visitor);
3616 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3617 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3618 WeakHashTable* table =
3619 WeakHashTable::cast(heap_->weak_object_to_code_table());
3620 table->Iterate(&updating_visitor);
3621 table->Rehash(heap_->undefined_value());
3624 // Update pointers from external string table.
3625 heap_->UpdateReferencesInExternalStringTable(
3626 &UpdateReferenceInExternalStringTableEntry);
3628 EvacuationWeakObjectRetainer evacuation_object_retainer;
3629 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3631 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3633 ProcessInvalidatedCode(&updating_visitor);
3635 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3638 if (FLAG_verify_heap) {
3639 VerifyEvacuation(heap_);
3643 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3644 ASSERT(migration_slots_buffer_ == NULL);
3648 void MarkCompactCollector::UnlinkEvacuationCandidates() {
3649 int npages = evacuation_candidates_.length();
3650 for (int i = 0; i < npages; i++) {
3651 Page* p = evacuation_candidates_[i];
3652 if (!p->IsEvacuationCandidate()) continue;
3654 p->ClearSweptPrecisely();
3655 p->ClearSweptConservatively();
3660 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3661 int npages = evacuation_candidates_.length();
3662 for (int i = 0; i < npages; i++) {
3663 Page* p = evacuation_candidates_[i];
3664 if (!p->IsEvacuationCandidate()) continue;
3665 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3666 space->Free(p->area_start(), p->area_size());
3667 p->set_scan_on_scavenge(false);
3668 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3669 p->ResetLiveBytes();
3670 space->ReleasePage(p, false);
3672 evacuation_candidates_.Rewind(0);
3673 compacting_ = false;
3674 heap()->FreeQueuedChunks();
3678 static const int kStartTableEntriesPerLine = 5;
3679 static const int kStartTableLines = 171;
3680 static const int kStartTableInvalidLine = 127;
3681 static const int kStartTableUnusedEntry = 126;
3683 #define _ kStartTableUnusedEntry
3684 #define X kStartTableInvalidLine
3685 // Mark-bit to object start offset table.
3687 // The line is indexed by the mark bits in a byte. The first number on
3688 // the line describes the number of live object starts for the line and the
3689 // other numbers on the line describe the offsets (in words) of the object
3692 // Since objects are at least 2 words large we don't have entries for two
3693 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3694 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3705 2, 1, 3, _, _, // 10
3706 X, _, _, _, _, // 11
3707 X, _, _, _, _, // 12
3708 X, _, _, _, _, // 13
3709 X, _, _, _, _, // 14
3710 X, _, _, _, _, // 15
3711 1, 4, _, _, _, // 16
3712 2, 0, 4, _, _, // 17
3713 2, 1, 4, _, _, // 18
3714 X, _, _, _, _, // 19
3715 2, 2, 4, _, _, // 20
3716 3, 0, 2, 4, _, // 21
3717 X, _, _, _, _, // 22
3718 X, _, _, _, _, // 23
3719 X, _, _, _, _, // 24
3720 X, _, _, _, _, // 25
3721 X, _, _, _, _, // 26
3722 X, _, _, _, _, // 27
3723 X, _, _, _, _, // 28
3724 X, _, _, _, _, // 29
3725 X, _, _, _, _, // 30
3726 X, _, _, _, _, // 31
3727 1, 5, _, _, _, // 32
3728 2, 0, 5, _, _, // 33
3729 2, 1, 5, _, _, // 34
3730 X, _, _, _, _, // 35
3731 2, 2, 5, _, _, // 36
3732 3, 0, 2, 5, _, // 37
3733 X, _, _, _, _, // 38
3734 X, _, _, _, _, // 39
3735 2, 3, 5, _, _, // 40
3736 3, 0, 3, 5, _, // 41
3737 3, 1, 3, 5, _, // 42
3738 X, _, _, _, _, // 43
3739 X, _, _, _, _, // 44
3740 X, _, _, _, _, // 45
3741 X, _, _, _, _, // 46
3742 X, _, _, _, _, // 47
3743 X, _, _, _, _, // 48
3744 X, _, _, _, _, // 49
3745 X, _, _, _, _, // 50
3746 X, _, _, _, _, // 51
3747 X, _, _, _, _, // 52
3748 X, _, _, _, _, // 53
3749 X, _, _, _, _, // 54
3750 X, _, _, _, _, // 55
3751 X, _, _, _, _, // 56
3752 X, _, _, _, _, // 57
3753 X, _, _, _, _, // 58
3754 X, _, _, _, _, // 59
3755 X, _, _, _, _, // 60
3756 X, _, _, _, _, // 61
3757 X, _, _, _, _, // 62
3758 X, _, _, _, _, // 63
3759 1, 6, _, _, _, // 64
3760 2, 0, 6, _, _, // 65
3761 2, 1, 6, _, _, // 66
3762 X, _, _, _, _, // 67
3763 2, 2, 6, _, _, // 68
3764 3, 0, 2, 6, _, // 69
3765 X, _, _, _, _, // 70
3766 X, _, _, _, _, // 71
3767 2, 3, 6, _, _, // 72
3768 3, 0, 3, 6, _, // 73
3769 3, 1, 3, 6, _, // 74
3770 X, _, _, _, _, // 75
3771 X, _, _, _, _, // 76
3772 X, _, _, _, _, // 77
3773 X, _, _, _, _, // 78
3774 X, _, _, _, _, // 79
3775 2, 4, 6, _, _, // 80
3776 3, 0, 4, 6, _, // 81
3777 3, 1, 4, 6, _, // 82
3778 X, _, _, _, _, // 83
3779 3, 2, 4, 6, _, // 84
3780 4, 0, 2, 4, 6, // 85
3781 X, _, _, _, _, // 86
3782 X, _, _, _, _, // 87
3783 X, _, _, _, _, // 88
3784 X, _, _, _, _, // 89
3785 X, _, _, _, _, // 90
3786 X, _, _, _, _, // 91
3787 X, _, _, _, _, // 92
3788 X, _, _, _, _, // 93
3789 X, _, _, _, _, // 94
3790 X, _, _, _, _, // 95
3791 X, _, _, _, _, // 96
3792 X, _, _, _, _, // 97
3793 X, _, _, _, _, // 98
3794 X, _, _, _, _, // 99
3795 X, _, _, _, _, // 100
3796 X, _, _, _, _, // 101
3797 X, _, _, _, _, // 102
3798 X, _, _, _, _, // 103
3799 X, _, _, _, _, // 104
3800 X, _, _, _, _, // 105
3801 X, _, _, _, _, // 106
3802 X, _, _, _, _, // 107
3803 X, _, _, _, _, // 108
3804 X, _, _, _, _, // 109
3805 X, _, _, _, _, // 110
3806 X, _, _, _, _, // 111
3807 X, _, _, _, _, // 112
3808 X, _, _, _, _, // 113
3809 X, _, _, _, _, // 114
3810 X, _, _, _, _, // 115
3811 X, _, _, _, _, // 116
3812 X, _, _, _, _, // 117
3813 X, _, _, _, _, // 118
3814 X, _, _, _, _, // 119
3815 X, _, _, _, _, // 120
3816 X, _, _, _, _, // 121
3817 X, _, _, _, _, // 122
3818 X, _, _, _, _, // 123
3819 X, _, _, _, _, // 124
3820 X, _, _, _, _, // 125
3821 X, _, _, _, _, // 126
3822 X, _, _, _, _, // 127
3823 1, 7, _, _, _, // 128
3824 2, 0, 7, _, _, // 129
3825 2, 1, 7, _, _, // 130
3826 X, _, _, _, _, // 131
3827 2, 2, 7, _, _, // 132
3828 3, 0, 2, 7, _, // 133
3829 X, _, _, _, _, // 134
3830 X, _, _, _, _, // 135
3831 2, 3, 7, _, _, // 136
3832 3, 0, 3, 7, _, // 137
3833 3, 1, 3, 7, _, // 138
3834 X, _, _, _, _, // 139
3835 X, _, _, _, _, // 140
3836 X, _, _, _, _, // 141
3837 X, _, _, _, _, // 142
3838 X, _, _, _, _, // 143
3839 2, 4, 7, _, _, // 144
3840 3, 0, 4, 7, _, // 145
3841 3, 1, 4, 7, _, // 146
3842 X, _, _, _, _, // 147
3843 3, 2, 4, 7, _, // 148
3844 4, 0, 2, 4, 7, // 149
3845 X, _, _, _, _, // 150
3846 X, _, _, _, _, // 151
3847 X, _, _, _, _, // 152
3848 X, _, _, _, _, // 153
3849 X, _, _, _, _, // 154
3850 X, _, _, _, _, // 155
3851 X, _, _, _, _, // 156
3852 X, _, _, _, _, // 157
3853 X, _, _, _, _, // 158
3854 X, _, _, _, _, // 159
3855 2, 5, 7, _, _, // 160
3856 3, 0, 5, 7, _, // 161
3857 3, 1, 5, 7, _, // 162
3858 X, _, _, _, _, // 163
3859 3, 2, 5, 7, _, // 164
3860 4, 0, 2, 5, 7, // 165
3861 X, _, _, _, _, // 166
3862 X, _, _, _, _, // 167
3863 3, 3, 5, 7, _, // 168
3864 4, 0, 3, 5, 7, // 169
3865 4, 1, 3, 5, 7 // 170
3871 // Takes a word of mark bits. Returns the number of objects that start in the
3872 // range. Puts the offsets of the words in the supplied array.
3873 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3877 // No consecutive 1 bits.
3878 ASSERT((mark_bits & 0x180) != 0x180);
3879 ASSERT((mark_bits & 0x18000) != 0x18000);
3880 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3882 while (mark_bits != 0) {
3883 int byte = (mark_bits & 0xff);
3886 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3887 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3888 int objects_in_these_8_words = table[0];
3889 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3890 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3891 for (int i = 0; i < objects_in_these_8_words; i++) {
3892 starts[objects++] = offset + table[1 + i];
3901 static inline Address DigestFreeStart(Address approximate_free_start,
3902 uint32_t free_start_cell) {
3903 ASSERT(free_start_cell != 0);
3905 // No consecutive 1 bits.
3906 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3909 uint32_t cell = free_start_cell;
3910 int offset_of_last_live;
3911 if ((cell & 0x80000000u) != 0) {
3912 // This case would overflow below.
3913 offset_of_last_live = 31;
3915 // Remove all but one bit, the most significant. This is an optimization
3916 // that may or may not be worthwhile.
3922 cell = (cell + 1) >> 1;
3923 int live_objects = MarkWordToObjectStarts(cell, offsets);
3924 ASSERT(live_objects == 1);
3925 offset_of_last_live = offsets[live_objects - 1];
3927 Address last_live_start =
3928 approximate_free_start + offset_of_last_live * kPointerSize;
3929 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3930 Address free_start = last_live_start + last_live->Size();
3935 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3938 // No consecutive 1 bits.
3939 ASSERT((cell & (cell << 1)) == 0);
3942 if (cell == 0x80000000u) { // Avoid overflow below.
3943 return block_address + 31 * kPointerSize;
3945 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3946 ASSERT((first_set_bit & cell) == first_set_bit);
3947 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3948 ASSERT(live_objects == 1);
3950 return block_address + offsets[0] * kPointerSize;
3954 template<MarkCompactCollector::SweepingParallelism mode>
3955 static intptr_t Free(PagedSpace* space,
3956 FreeList* free_list,
3959 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3960 return space->Free(start, size);
3962 return size - free_list->Free(start, size);
3967 // Force instantiation of templatized SweepConservatively method for
3968 // SWEEP_SEQUENTIALLY mode.
3969 template intptr_t MarkCompactCollector::
3970 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3971 PagedSpace*, FreeList*, Page*);
3974 // Force instantiation of templatized SweepConservatively method for
3975 // SWEEP_IN_PARALLEL mode.
3976 template intptr_t MarkCompactCollector::
3977 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3978 PagedSpace*, FreeList*, Page*);
3981 // Sweeps a space conservatively. After this has been done the larger free
3982 // spaces have been put on the free list and the smaller ones have been
3983 // ignored and left untouched. A free space is always either ignored or put
3984 // on the free list, never split up into two parts. This is important
3985 // because it means that any FreeSpace maps left actually describe a region of
3986 // memory that can be ignored when scanning. Dead objects other than free
3987 // spaces will not contain the free space map.
3988 template<MarkCompactCollector::SweepingParallelism mode>
3989 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3990 FreeList* free_list,
3992 // TODO(hpayer): This check is just used for debugging purpose and
3993 // should be removed or turned into an assert after investigating the
3994 // crash in concurrent sweeping.
3995 CHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3996 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3997 free_list != NULL) ||
3998 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3999 free_list == NULL));
4001 // When parallel sweeping is active, the page will be marked after
4002 // sweeping by the main thread.
4003 if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
4004 p->MarkSweptConservatively();
4007 intptr_t freed_bytes = 0;
4010 // Skip over all the dead objects at the start of the page and mark them free.
4011 Address cell_base = 0;
4012 MarkBit::CellType* cell = NULL;
4013 MarkBitCellIterator it(p);
4014 for (; !it.Done(); it.Advance()) {
4015 cell_base = it.CurrentCellBase();
4016 cell = it.CurrentCell();
4017 if (*cell != 0) break;
4021 size = p->area_end() - p->area_start();
4022 freed_bytes += Free<mode>(space, free_list, p->area_start(),
4023 static_cast<int>(size));
4024 ASSERT_EQ(0, p->LiveBytes());
4028 // Grow the size of the start-of-page free space a little to get up to the
4029 // first live object.
4030 Address free_end = StartOfLiveObject(cell_base, *cell);
4031 // Free the first free space.
4032 size = free_end - p->area_start();
4033 freed_bytes += Free<mode>(space, free_list, p->area_start(),
4034 static_cast<int>(size));
4036 // The start of the current free area is represented in undigested form by
4037 // the address of the last 32-word section that contained a live object and
4038 // the marking bitmap for that cell, which describes where the live object
4039 // started. Unless we find a large free space in the bitmap we will not
4040 // digest this pair into a real address. We start the iteration here at the
4041 // first word in the marking bit map that indicates a live object.
4042 Address free_start = cell_base;
4043 MarkBit::CellType free_start_cell = *cell;
4045 for (; !it.Done(); it.Advance()) {
4046 cell_base = it.CurrentCellBase();
4047 cell = it.CurrentCell();
4049 // We have a live object. Check approximately whether it is more than 32
4050 // words since the last live object.
4051 if (cell_base - free_start > 32 * kPointerSize) {
4052 free_start = DigestFreeStart(free_start, free_start_cell);
4053 if (cell_base - free_start > 32 * kPointerSize) {
4054 // Now that we know the exact start of the free space it still looks
4055 // like we have a large enough free space to be worth bothering with.
4056 // so now we need to find the start of the first live object at the
4057 // end of the free space.
4058 free_end = StartOfLiveObject(cell_base, *cell);
4059 freed_bytes += Free<mode>(space, free_list, free_start,
4060 static_cast<int>(free_end - free_start));
4063 // Update our undigested record of where the current free area started.
4064 free_start = cell_base;
4065 free_start_cell = *cell;
4066 // Clear marking bits for current cell.
4071 // Handle the free space at the end of the page.
4072 if (cell_base - free_start > 32 * kPointerSize) {
4073 free_start = DigestFreeStart(free_start, free_start_cell);
4074 freed_bytes += Free<mode>(space, free_list, free_start,
4075 static_cast<int>(p->area_end() - free_start));
4078 p->ResetLiveBytes();
4083 void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
4084 PageIterator it(space);
4085 FreeList* free_list = space == heap()->old_pointer_space()
4086 ? free_list_old_pointer_space_.get()
4087 : free_list_old_data_space_.get();
4088 FreeList private_free_list(space);
4089 while (it.has_next()) {
4090 Page* p = it.next();
4092 if (p->TryParallelSweeping()) {
4093 SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
4094 free_list->Concatenate(&private_free_list);
4095 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
4101 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4102 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
4103 sweeper == LAZY_CONSERVATIVE ||
4104 sweeper == PARALLEL_CONSERVATIVE ||
4105 sweeper == CONCURRENT_CONSERVATIVE);
4106 space->ClearStats();
4108 PageIterator it(space);
4110 int pages_swept = 0;
4111 bool lazy_sweeping_active = false;
4112 bool unused_page_present = false;
4113 bool parallel_sweeping_active = false;
4115 while (it.has_next()) {
4116 Page* p = it.next();
4118 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4119 ASSERT(!p->IsEvacuationCandidate());
4121 // Clear sweeping flags indicating that marking bits are still intact.
4122 p->ClearSweptPrecisely();
4123 p->ClearSweptConservatively();
4125 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
4126 // Will be processed in EvacuateNewSpaceAndCandidates.
4127 ASSERT(evacuation_candidates_.length() > 0);
4131 // One unused page is kept, all further are released before sweeping them.
4132 if (p->LiveBytes() == 0) {
4133 if (unused_page_present) {
4134 if (FLAG_gc_verbose) {
4135 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4136 reinterpret_cast<intptr_t>(p));
4138 // Adjust unswept free bytes because releasing a page expects said
4139 // counter to be accurate for unswept pages.
4140 space->IncreaseUnsweptFreeBytes(p);
4141 space->ReleasePage(p, true);
4144 unused_page_present = true;
4148 case CONSERVATIVE: {
4149 if (FLAG_gc_verbose) {
4150 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4151 reinterpret_cast<intptr_t>(p));
4153 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4157 case LAZY_CONSERVATIVE: {
4158 if (lazy_sweeping_active) {
4159 if (FLAG_gc_verbose) {
4160 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4161 reinterpret_cast<intptr_t>(p));
4163 space->IncreaseUnsweptFreeBytes(p);
4165 if (FLAG_gc_verbose) {
4166 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4167 reinterpret_cast<intptr_t>(p));
4169 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4171 space->SetPagesToSweep(p->next_page());
4172 lazy_sweeping_active = true;
4176 case CONCURRENT_CONSERVATIVE:
4177 case PARALLEL_CONSERVATIVE: {
4178 if (!parallel_sweeping_active) {
4179 if (FLAG_gc_verbose) {
4180 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4181 reinterpret_cast<intptr_t>(p));
4183 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4185 parallel_sweeping_active = true;
4187 if (FLAG_gc_verbose) {
4188 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4189 reinterpret_cast<intptr_t>(p));
4191 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
4192 space->IncreaseUnsweptFreeBytes(p);
4197 if (FLAG_gc_verbose) {
4198 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4199 reinterpret_cast<intptr_t>(p));
4201 if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
4202 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
4204 } else if (space->identity() == CODE_SPACE) {
4205 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
4208 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
4220 if (FLAG_gc_verbose) {
4221 PrintF("SweepSpace: %s (%d pages swept)\n",
4222 AllocationSpaceName(space->identity()),
4226 // Give pages that are queued to be freed back to the OS.
4227 heap()->FreeQueuedChunks();
4231 void MarkCompactCollector::SweepSpaces() {
4232 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4234 state_ = SWEEP_SPACES;
4236 SweeperType how_to_sweep =
4237 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4238 if (AreSweeperThreadsActivated()) {
4239 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4240 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4242 if (sweep_precisely_) how_to_sweep = PRECISE;
4244 // Unlink evacuation candidates before sweeper threads access the list of
4245 // pages to avoid race condition.
4246 UnlinkEvacuationCandidates();
4248 // Noncompacting collections simply sweep the spaces to clear the mark
4249 // bits and free the nonlive blocks (for old and map spaces). We sweep
4250 // the map space last because freeing non-live maps overwrites them and
4251 // the other spaces rely on possibly non-live maps to get the sizes for
4252 // non-live objects.
4253 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4254 { SequentialSweepingScope scope(this);
4255 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4256 SweepSpace(heap()->old_data_space(), how_to_sweep);
4259 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4260 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4261 // TODO(hpayer): fix race with concurrent sweeper
4262 StartSweeperThreads();
4265 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4266 WaitUntilSweepingCompleted();
4269 RemoveDeadInvalidatedCode();
4270 SweepSpace(heap()->code_space(), PRECISE);
4272 SweepSpace(heap()->cell_space(), PRECISE);
4273 SweepSpace(heap()->property_cell_space(), PRECISE);
4275 EvacuateNewSpaceAndCandidates();
4277 // ClearNonLiveTransitions depends on precise sweeping of map space to
4278 // detect whether unmarked map became dead in this collection or in one
4279 // of the previous ones.
4280 SweepSpace(heap()->map_space(), PRECISE);
4282 // Deallocate unmarked objects and clear marked bits for marked objects.
4283 heap_->lo_space()->FreeUnmarkedObjects();
4285 // Deallocate evacuated candidate pages.
4286 ReleaseEvacuationCandidates();
4290 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
4291 PageIterator it(space);
4292 while (it.has_next()) {
4293 Page* p = it.next();
4294 if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
4295 p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
4296 p->MarkSweptConservatively();
4298 ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
4303 void MarkCompactCollector::ParallelSweepSpacesComplete() {
4304 ParallelSweepSpaceComplete(heap()->old_pointer_space());
4305 ParallelSweepSpaceComplete(heap()->old_data_space());
4309 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4310 #ifdef ENABLE_DEBUGGER_SUPPORT
4311 if (isolate()->debug()->IsLoaded() ||
4312 isolate()->debug()->has_break_points()) {
4318 if (code_flusher_ != NULL) return;
4319 code_flusher_ = new CodeFlusher(isolate());
4321 if (code_flusher_ == NULL) return;
4322 code_flusher_->EvictAllCandidates();
4323 delete code_flusher_;
4324 code_flusher_ = NULL;
4327 if (FLAG_trace_code_flushing) {
4328 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4333 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4334 // Our profiling tools do not expect intersections between
4335 // code objects. We should either reenable it or change our tools.
4336 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4338 #ifdef ENABLE_GDB_JIT_INTERFACE
4339 if (obj->IsCode()) {
4340 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4343 if (obj->IsCode()) {
4344 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4349 Isolate* MarkCompactCollector::isolate() const {
4350 return heap_->isolate();
4354 void MarkCompactCollector::Initialize() {
4355 MarkCompactMarkingVisitor::Initialize();
4356 IncrementalMarking::Initialize();
4360 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4361 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4365 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4366 SlotsBuffer** buffer_address,
4369 AdditionMode mode) {
4370 SlotsBuffer* buffer = *buffer_address;
4371 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4372 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4373 allocator->DeallocateChain(buffer_address);
4376 buffer = allocator->AllocateBuffer(buffer);
4377 *buffer_address = buffer;
4379 ASSERT(buffer->HasSpaceForTypedSlot());
4380 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4381 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4386 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4387 if (RelocInfo::IsCodeTarget(rmode)) {
4388 return SlotsBuffer::CODE_TARGET_SLOT;
4389 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4390 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4391 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4392 return SlotsBuffer::DEBUG_TARGET_SLOT;
4393 } else if (RelocInfo::IsJSReturn(rmode)) {
4394 return SlotsBuffer::JS_RETURN_SLOT;
4397 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4401 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4402 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4403 RelocInfo::Mode rmode = rinfo->rmode();
4404 if (target_page->IsEvacuationCandidate() &&
4405 (rinfo->host() == NULL ||
4406 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4408 if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
4409 // This doesn't need to be typed since it is just a normal heap pointer.
4410 Object** target_pointer =
4411 reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
4412 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4413 target_page->slots_buffer_address(),
4415 SlotsBuffer::FAIL_ON_OVERFLOW);
4416 } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
4417 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4418 target_page->slots_buffer_address(),
4419 SlotsBuffer::CODE_ENTRY_SLOT,
4420 rinfo->constant_pool_entry_address(),
4421 SlotsBuffer::FAIL_ON_OVERFLOW);
4423 success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
4424 target_page->slots_buffer_address(),
4425 SlotTypeForRMode(rmode),
4427 SlotsBuffer::FAIL_ON_OVERFLOW);
4430 EvictEvacuationCandidate(target_page);
4436 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4437 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4438 if (target_page->IsEvacuationCandidate() &&
4439 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4440 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4441 target_page->slots_buffer_address(),
4442 SlotsBuffer::CODE_ENTRY_SLOT,
4444 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4445 EvictEvacuationCandidate(target_page);
4451 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4452 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4453 if (is_compacting()) {
4454 Code* host = isolate()->inner_pointer_to_code_cache()->
4455 GcSafeFindCodeForInnerPointer(pc);
4456 MarkBit mark_bit = Marking::MarkBitFrom(host);
4457 if (Marking::IsBlack(mark_bit)) {
4458 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4459 RecordRelocSlot(&rinfo, target);
4465 static inline SlotsBuffer::SlotType DecodeSlotType(
4466 SlotsBuffer::ObjectSlot slot) {
4467 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4471 void SlotsBuffer::UpdateSlots(Heap* heap) {
4472 PointersUpdatingVisitor v(heap);
4474 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4475 ObjectSlot slot = slots_[slot_idx];
4476 if (!IsTypedSlot(slot)) {
4477 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4480 ASSERT(slot_idx < idx_);
4481 UpdateSlot(heap->isolate(),
4483 DecodeSlotType(slot),
4484 reinterpret_cast<Address>(slots_[slot_idx]));
4490 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4491 PointersUpdatingVisitor v(heap);
4493 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4494 ObjectSlot slot = slots_[slot_idx];
4495 if (!IsTypedSlot(slot)) {
4496 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4497 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4501 ASSERT(slot_idx < idx_);
4502 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4503 if (!IsOnInvalidatedCodeObject(pc)) {
4504 UpdateSlot(heap->isolate(),
4506 DecodeSlotType(slot),
4507 reinterpret_cast<Address>(slots_[slot_idx]));
4514 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4515 return new SlotsBuffer(next_buffer);
4519 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4524 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4525 SlotsBuffer* buffer = *buffer_address;
4526 while (buffer != NULL) {
4527 SlotsBuffer* next_buffer = buffer->next();
4528 DeallocateBuffer(buffer);
4529 buffer = next_buffer;
4531 *buffer_address = NULL;
4535 } } // namespace v8::internal