1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "cpu-profiler.h"
33 #include "deoptimizer.h"
34 #include "execution.h"
36 #include "global-handles.h"
37 #include "heap-profiler.h"
39 #include "incremental-marking.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
44 #include "sweeper-thread.h"
50 const char* Marking::kWhiteBitPattern = "00";
51 const char* Marking::kBlackBitPattern = "10";
52 const char* Marking::kGreyBitPattern = "11";
53 const char* Marking::kImpossibleBitPattern = "01";
56 // -------------------------------------------------------------------------
57 // MarkCompactCollector
59 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
63 sweep_precisely_(false),
64 reduce_memory_footprint_(false),
65 abort_incremental_marking_(false),
66 marking_parity_(ODD_MARKING_PARITY),
68 was_marked_incrementally_(false),
69 sweeping_pending_(false),
70 sequential_sweeping_(false),
72 migration_slots_buffer_(NULL),
75 encountered_weak_collections_(NULL),
76 have_code_to_deoptimize_(false) { }
79 class VerifyMarkingVisitor: public ObjectVisitor {
81 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
83 void VisitPointers(Object** start, Object** end) {
84 for (Object** current = start; current < end; current++) {
85 if ((*current)->IsHeapObject()) {
86 HeapObject* object = HeapObject::cast(*current);
87 CHECK(heap_->mark_compact_collector()->IsMarked(object));
92 void VisitEmbeddedPointer(RelocInfo* rinfo) {
93 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
94 if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
95 rinfo->target_object())) {
96 Object* p = rinfo->target_object();
101 void VisitCell(RelocInfo* rinfo) {
102 Code* code = rinfo->host();
103 ASSERT(rinfo->rmode() == RelocInfo::CELL);
104 if (!Code::IsWeakEmbeddedObject(code->kind(), rinfo->target_cell())) {
105 ObjectVisitor::VisitCell(rinfo);
114 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
115 VerifyMarkingVisitor visitor(heap);
117 Address next_object_must_be_here_or_later = bottom;
119 for (Address current = bottom;
121 current += kPointerSize) {
122 object = HeapObject::FromAddress(current);
123 if (MarkCompactCollector::IsMarked(object)) {
124 CHECK(current >= next_object_must_be_here_or_later);
125 object->Iterate(&visitor);
126 next_object_must_be_here_or_later = current + object->Size();
132 static void VerifyMarking(NewSpace* space) {
133 Address end = space->top();
134 NewSpacePageIterator it(space->bottom(), end);
135 // The bottom position is at the start of its page. Allows us to use
136 // page->area_start() as start of range on all pages.
137 CHECK_EQ(space->bottom(),
138 NewSpacePage::FromAddress(space->bottom())->area_start());
139 while (it.has_next()) {
140 NewSpacePage* page = it.next();
141 Address limit = it.has_next() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
148 static void VerifyMarking(PagedSpace* space) {
149 PageIterator it(space);
151 while (it.has_next()) {
153 VerifyMarking(space->heap(), p->area_start(), p->area_end());
158 static void VerifyMarking(Heap* heap) {
159 VerifyMarking(heap->old_pointer_space());
160 VerifyMarking(heap->old_data_space());
161 VerifyMarking(heap->code_space());
162 VerifyMarking(heap->cell_space());
163 VerifyMarking(heap->property_cell_space());
164 VerifyMarking(heap->map_space());
165 VerifyMarking(heap->new_space());
167 VerifyMarkingVisitor visitor(heap);
169 LargeObjectIterator it(heap->lo_space());
170 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
171 if (MarkCompactCollector::IsMarked(obj)) {
172 obj->Iterate(&visitor);
176 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
180 class VerifyEvacuationVisitor: public ObjectVisitor {
182 void VisitPointers(Object** start, Object** end) {
183 for (Object** current = start; current < end; current++) {
184 if ((*current)->IsHeapObject()) {
185 HeapObject* object = HeapObject::cast(*current);
186 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
193 static void VerifyEvacuation(Address bottom, Address top) {
194 VerifyEvacuationVisitor visitor;
196 Address next_object_must_be_here_or_later = bottom;
198 for (Address current = bottom;
200 current += kPointerSize) {
201 object = HeapObject::FromAddress(current);
202 if (MarkCompactCollector::IsMarked(object)) {
203 CHECK(current >= next_object_must_be_here_or_later);
204 object->Iterate(&visitor);
205 next_object_must_be_here_or_later = current + object->Size();
211 static void VerifyEvacuation(NewSpace* space) {
212 NewSpacePageIterator it(space->bottom(), space->top());
213 VerifyEvacuationVisitor visitor;
215 while (it.has_next()) {
216 NewSpacePage* page = it.next();
217 Address current = page->area_start();
218 Address limit = it.has_next() ? page->area_end() : space->top();
219 CHECK(limit == space->top() || !page->Contains(space->top()));
220 while (current < limit) {
221 HeapObject* object = HeapObject::FromAddress(current);
222 object->Iterate(&visitor);
223 current += object->Size();
229 static void VerifyEvacuation(PagedSpace* space) {
230 PageIterator it(space);
232 while (it.has_next()) {
234 if (p->IsEvacuationCandidate()) continue;
235 VerifyEvacuation(p->area_start(), p->area_end());
240 static void VerifyEvacuation(Heap* heap) {
241 VerifyEvacuation(heap->old_pointer_space());
242 VerifyEvacuation(heap->old_data_space());
243 VerifyEvacuation(heap->code_space());
244 VerifyEvacuation(heap->cell_space());
245 VerifyEvacuation(heap->property_cell_space());
246 VerifyEvacuation(heap->map_space());
247 VerifyEvacuation(heap->new_space());
249 VerifyEvacuationVisitor visitor;
250 heap->IterateStrongRoots(&visitor, VISIT_ALL);
252 #endif // VERIFY_HEAP
256 class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
258 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
260 void VisitPointers(Object** start, Object** end) {
261 for (Object** current = start; current < end; current++) {
262 if ((*current)->IsHeapObject()) {
263 HeapObject* object = HeapObject::cast(*current);
264 if (object->IsString()) continue;
265 switch (object->map()->instance_type()) {
266 case JS_FUNCTION_TYPE:
267 CheckContext(JSFunction::cast(object)->context());
269 case JS_GLOBAL_PROXY_TYPE:
270 CheckContext(JSGlobalProxy::cast(object)->native_context());
272 case JS_GLOBAL_OBJECT_TYPE:
273 case JS_BUILTINS_OBJECT_TYPE:
274 CheckContext(GlobalObject::cast(object)->native_context());
280 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
283 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
284 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
286 case FIXED_ARRAY_TYPE:
287 if (object->IsContext()) {
288 CheckContext(object);
290 FixedArray* array = FixedArray::cast(object);
291 int length = array->length();
292 // Set array length to zero to prevent cycles while iterating
293 // over array bodies, this is easier than intrusive marking.
294 array->set_length(0);
296 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
297 array->set_length(length);
303 case TYPE_FEEDBACK_INFO_TYPE:
304 object->Iterate(this);
306 case DECLARED_ACCESSOR_INFO_TYPE:
307 case EXECUTABLE_ACCESSOR_INFO_TYPE:
308 case BYTE_ARRAY_TYPE:
309 case CALL_HANDLER_INFO_TYPE:
311 case FIXED_DOUBLE_ARRAY_TYPE:
312 case HEAP_NUMBER_TYPE:
315 case INTERCEPTOR_INFO_TYPE:
318 case SHARED_FUNCTION_INFO_TYPE:
328 void CheckContext(Object* context) {
329 if (!context->IsContext()) return;
330 Context* native_context = Context::cast(context)->native_context();
331 if (current_native_context_ == NULL) {
332 current_native_context_ = native_context;
334 CHECK_EQ(current_native_context_, native_context);
338 Context* current_native_context_;
342 static void VerifyNativeContextSeparation(Heap* heap) {
343 HeapObjectIterator it(heap->code_space());
345 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
346 VerifyNativeContextSeparationVisitor visitor;
347 Code::cast(object)->CodeIterateBody(&visitor);
353 void MarkCompactCollector::SetUp() {
354 free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
355 free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
359 void MarkCompactCollector::TearDown() {
364 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
365 p->MarkEvacuationCandidate();
366 evacuation_candidates_.Add(p);
370 static void TraceFragmentation(PagedSpace* space) {
371 int number_of_pages = space->CountTotalPages();
372 intptr_t reserved = (number_of_pages * space->AreaSize());
373 intptr_t free = reserved - space->SizeOfObjects();
374 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
375 AllocationSpaceName(space->identity()),
377 static_cast<int>(free),
378 static_cast<double>(free) * 100 / reserved);
382 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
384 ASSERT(evacuation_candidates_.length() == 0);
386 #ifdef ENABLE_GDB_JIT_INTERFACE
387 // If GDBJIT interface is active disable compaction.
388 if (FLAG_gdbjit) return false;
391 CollectEvacuationCandidates(heap()->old_pointer_space());
392 CollectEvacuationCandidates(heap()->old_data_space());
394 if (FLAG_compact_code_space &&
395 (mode == NON_INCREMENTAL_COMPACTION ||
396 FLAG_incremental_code_compaction)) {
397 CollectEvacuationCandidates(heap()->code_space());
398 } else if (FLAG_trace_fragmentation) {
399 TraceFragmentation(heap()->code_space());
402 if (FLAG_trace_fragmentation) {
403 TraceFragmentation(heap()->map_space());
404 TraceFragmentation(heap()->cell_space());
405 TraceFragmentation(heap()->property_cell_space());
408 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
409 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
410 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
412 compacting_ = evacuation_candidates_.length() > 0;
419 void MarkCompactCollector::CollectGarbage() {
420 // Make sure that Prepare() has been called. The individual steps below will
421 // update the state as they proceed.
422 ASSERT(state_ == PREPARE_GC);
423 ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
426 ASSERT(heap_->incremental_marking()->IsStopped());
428 if (FLAG_collect_maps) ClearNonLiveReferences();
430 ClearWeakCollections();
433 if (FLAG_verify_heap) {
434 VerifyMarking(heap_);
440 if (!FLAG_collect_maps) ReattachInitialMaps();
443 if (FLAG_verify_native_context_separation) {
444 VerifyNativeContextSeparation(heap_);
449 if (heap()->weak_embedded_objects_verification_enabled()) {
450 VerifyWeakEmbeddedObjectsInOptimizedCode();
452 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
453 VerifyOmittedMapChecks();
459 if (marking_parity_ == EVEN_MARKING_PARITY) {
460 marking_parity_ = ODD_MARKING_PARITY;
462 ASSERT(marking_parity_ == ODD_MARKING_PARITY);
463 marking_parity_ = EVEN_MARKING_PARITY;
471 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
472 PageIterator it(space);
474 while (it.has_next()) {
476 CHECK(p->markbits()->IsClean());
477 CHECK_EQ(0, p->LiveBytes());
482 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
483 NewSpacePageIterator it(space->bottom(), space->top());
485 while (it.has_next()) {
486 NewSpacePage* p = it.next();
487 CHECK(p->markbits()->IsClean());
488 CHECK_EQ(0, p->LiveBytes());
493 void MarkCompactCollector::VerifyMarkbitsAreClean() {
494 VerifyMarkbitsAreClean(heap_->old_pointer_space());
495 VerifyMarkbitsAreClean(heap_->old_data_space());
496 VerifyMarkbitsAreClean(heap_->code_space());
497 VerifyMarkbitsAreClean(heap_->cell_space());
498 VerifyMarkbitsAreClean(heap_->property_cell_space());
499 VerifyMarkbitsAreClean(heap_->map_space());
500 VerifyMarkbitsAreClean(heap_->new_space());
502 LargeObjectIterator it(heap_->lo_space());
503 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
504 MarkBit mark_bit = Marking::MarkBitFrom(obj);
505 CHECK(Marking::IsWhite(mark_bit));
506 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
511 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
512 HeapObjectIterator code_iterator(heap()->code_space());
513 for (HeapObject* obj = code_iterator.Next();
515 obj = code_iterator.Next()) {
516 Code* code = Code::cast(obj);
517 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
518 if (WillBeDeoptimized(code)) continue;
519 code->VerifyEmbeddedObjectsDependency();
524 void MarkCompactCollector::VerifyOmittedMapChecks() {
525 HeapObjectIterator iterator(heap()->map_space());
526 for (HeapObject* obj = iterator.Next();
528 obj = iterator.Next()) {
529 Map* map = Map::cast(obj);
530 map->VerifyOmittedMapChecks();
533 #endif // VERIFY_HEAP
536 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
537 PageIterator it(space);
539 while (it.has_next()) {
540 Bitmap::Clear(it.next());
545 static void ClearMarkbitsInNewSpace(NewSpace* space) {
546 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
548 while (it.has_next()) {
549 Bitmap::Clear(it.next());
554 void MarkCompactCollector::ClearMarkbits() {
555 ClearMarkbitsInPagedSpace(heap_->code_space());
556 ClearMarkbitsInPagedSpace(heap_->map_space());
557 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
558 ClearMarkbitsInPagedSpace(heap_->old_data_space());
559 ClearMarkbitsInPagedSpace(heap_->cell_space());
560 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
561 ClearMarkbitsInNewSpace(heap_->new_space());
563 LargeObjectIterator it(heap_->lo_space());
564 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
565 MarkBit mark_bit = Marking::MarkBitFrom(obj);
567 mark_bit.Next().Clear();
568 Page::FromAddress(obj->address())->ResetProgressBar();
569 Page::FromAddress(obj->address())->ResetLiveBytes();
574 void MarkCompactCollector::StartSweeperThreads() {
575 // TODO(hpayer): This check is just used for debugging purpose and
576 // should be removed or turned into an assert after investigating the
577 // crash in concurrent sweeping.
578 CHECK(free_list_old_pointer_space_.get()->IsEmpty());
579 CHECK(free_list_old_data_space_.get()->IsEmpty());
580 sweeping_pending_ = true;
581 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
582 isolate()->sweeper_threads()[i]->StartSweeping();
587 void MarkCompactCollector::WaitUntilSweepingCompleted() {
588 ASSERT(sweeping_pending_ == true);
589 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
590 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
592 sweeping_pending_ = false;
593 RefillFreeLists(heap()->paged_space(OLD_DATA_SPACE));
594 RefillFreeLists(heap()->paged_space(OLD_POINTER_SPACE));
595 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
596 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
600 intptr_t MarkCompactCollector::RefillFreeLists(PagedSpace* space) {
603 if (space == heap()->old_pointer_space()) {
604 free_list = free_list_old_pointer_space_.get();
605 } else if (space == heap()->old_data_space()) {
606 free_list = free_list_old_data_space_.get();
608 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
609 // to only refill them for old data and pointer spaces.
613 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
614 space->AddToAccountingStats(freed_bytes);
615 space->DecrementUnsweptFreeBytes(freed_bytes);
620 bool MarkCompactCollector::AreSweeperThreadsActivated() {
621 return isolate()->sweeper_threads() != NULL;
625 bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
626 return sweeping_pending_;
630 bool Marking::TransferMark(Address old_start, Address new_start) {
631 // This is only used when resizing an object.
632 ASSERT(MemoryChunk::FromAddress(old_start) ==
633 MemoryChunk::FromAddress(new_start));
635 // If the mark doesn't move, we don't check the color of the object.
636 // It doesn't matter whether the object is black, since it hasn't changed
637 // size, so the adjustment to the live data count will be zero anyway.
638 if (old_start == new_start) return false;
640 MarkBit new_mark_bit = MarkBitFrom(new_start);
641 MarkBit old_mark_bit = MarkBitFrom(old_start);
644 ObjectColor old_color = Color(old_mark_bit);
647 if (Marking::IsBlack(old_mark_bit)) {
648 old_mark_bit.Clear();
649 ASSERT(IsWhite(old_mark_bit));
650 Marking::MarkBlack(new_mark_bit);
652 } else if (Marking::IsGrey(old_mark_bit)) {
653 ASSERT(heap_->incremental_marking()->IsMarking());
654 old_mark_bit.Clear();
655 old_mark_bit.Next().Clear();
656 ASSERT(IsWhite(old_mark_bit));
657 heap_->incremental_marking()->WhiteToGreyAndPush(
658 HeapObject::FromAddress(new_start), new_mark_bit);
659 heap_->incremental_marking()->RestartIfNotMarking();
663 ObjectColor new_color = Color(new_mark_bit);
664 ASSERT(new_color == old_color);
671 const char* AllocationSpaceName(AllocationSpace space) {
673 case NEW_SPACE: return "NEW_SPACE";
674 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
675 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
676 case CODE_SPACE: return "CODE_SPACE";
677 case MAP_SPACE: return "MAP_SPACE";
678 case CELL_SPACE: return "CELL_SPACE";
679 case PROPERTY_CELL_SPACE:
680 return "PROPERTY_CELL_SPACE";
681 case LO_SPACE: return "LO_SPACE";
690 // Returns zero for pages that have so little fragmentation that it is not
691 // worth defragmenting them. Otherwise a positive integer that gives an
692 // estimate of fragmentation on an arbitrary scale.
693 static int FreeListFragmentation(PagedSpace* space, Page* p) {
694 // If page was not swept then there are no free list items on it.
695 if (!p->WasSwept()) {
696 if (FLAG_trace_fragmentation) {
697 PrintF("%p [%s]: %d bytes live (unswept)\n",
698 reinterpret_cast<void*>(p),
699 AllocationSpaceName(space->identity()),
705 PagedSpace::SizeStats sizes;
706 space->ObtainFreeListStatistics(p, &sizes);
709 intptr_t ratio_threshold;
710 intptr_t area_size = space->AreaSize();
711 if (space->identity() == CODE_SPACE) {
712 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
714 ratio_threshold = 10;
716 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
718 ratio_threshold = 15;
721 if (FLAG_trace_fragmentation) {
722 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
723 reinterpret_cast<void*>(p),
724 AllocationSpaceName(space->identity()),
725 static_cast<int>(sizes.small_size_),
726 static_cast<double>(sizes.small_size_ * 100) /
728 static_cast<int>(sizes.medium_size_),
729 static_cast<double>(sizes.medium_size_ * 100) /
731 static_cast<int>(sizes.large_size_),
732 static_cast<double>(sizes.large_size_ * 100) /
734 static_cast<int>(sizes.huge_size_),
735 static_cast<double>(sizes.huge_size_ * 100) /
737 (ratio > ratio_threshold) ? "[fragmented]" : "");
740 if (FLAG_always_compact && sizes.Total() != area_size) {
744 if (ratio <= ratio_threshold) return 0; // Not fragmented.
746 return static_cast<int>(ratio - ratio_threshold);
750 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
751 ASSERT(space->identity() == OLD_POINTER_SPACE ||
752 space->identity() == OLD_DATA_SPACE ||
753 space->identity() == CODE_SPACE);
755 static const int kMaxMaxEvacuationCandidates = 1000;
756 int number_of_pages = space->CountTotalPages();
757 int max_evacuation_candidates =
758 static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
760 if (FLAG_stress_compaction || FLAG_always_compact) {
761 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
766 Candidate() : fragmentation_(0), page_(NULL) { }
767 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
769 int fragmentation() { return fragmentation_; }
770 Page* page() { return page_; }
777 enum CompactionMode {
779 REDUCE_MEMORY_FOOTPRINT
782 CompactionMode mode = COMPACT_FREE_LISTS;
784 intptr_t reserved = number_of_pages * space->AreaSize();
785 intptr_t over_reserved = reserved - space->SizeOfObjects();
786 static const intptr_t kFreenessThreshold = 50;
788 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
789 // If reduction of memory footprint was requested, we are aggressive
790 // about choosing pages to free. We expect that half-empty pages
791 // are easier to compact so slightly bump the limit.
792 mode = REDUCE_MEMORY_FOOTPRINT;
793 max_evacuation_candidates += 2;
797 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
798 // If over-usage is very high (more than a third of the space), we
799 // try to free all mostly empty pages. We expect that almost empty
800 // pages are even easier to compact so bump the limit even more.
801 mode = REDUCE_MEMORY_FOOTPRINT;
802 max_evacuation_candidates *= 2;
805 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
806 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
807 "evacuation candidate limit: %d\n",
808 static_cast<double>(over_reserved) / MB,
809 static_cast<double>(reserved) / MB,
810 static_cast<int>(kFreenessThreshold),
811 max_evacuation_candidates);
814 intptr_t estimated_release = 0;
816 Candidate candidates[kMaxMaxEvacuationCandidates];
818 max_evacuation_candidates =
819 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
822 int fragmentation = 0;
823 Candidate* least = NULL;
825 PageIterator it(space);
826 if (it.has_next()) it.next(); // Never compact the first page.
828 while (it.has_next()) {
830 p->ClearEvacuationCandidate();
832 if (FLAG_stress_compaction) {
833 unsigned int counter = space->heap()->ms_count();
834 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
835 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
836 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
837 // Don't try to release too many pages.
838 if (estimated_release >= over_reserved) {
842 intptr_t free_bytes = 0;
844 if (!p->WasSwept()) {
845 free_bytes = (p->area_size() - p->LiveBytes());
847 PagedSpace::SizeStats sizes;
848 space->ObtainFreeListStatistics(p, &sizes);
849 free_bytes = sizes.Total();
852 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
854 if (free_pct >= kFreenessThreshold) {
855 estimated_release += free_bytes;
856 fragmentation = free_pct;
861 if (FLAG_trace_fragmentation) {
862 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
863 reinterpret_cast<void*>(p),
864 AllocationSpaceName(space->identity()),
865 static_cast<int>(free_bytes),
866 static_cast<double>(free_bytes * 100) / p->area_size(),
867 (fragmentation > 0) ? "[fragmented]" : "");
870 fragmentation = FreeListFragmentation(space, p);
873 if (fragmentation != 0) {
874 if (count < max_evacuation_candidates) {
875 candidates[count++] = Candidate(fragmentation, p);
878 for (int i = 0; i < max_evacuation_candidates; i++) {
880 candidates[i].fragmentation() < least->fragmentation()) {
881 least = candidates + i;
885 if (least->fragmentation() < fragmentation) {
886 *least = Candidate(fragmentation, p);
893 for (int i = 0; i < count; i++) {
894 AddEvacuationCandidate(candidates[i].page());
897 if (count > 0 && FLAG_trace_fragmentation) {
898 PrintF("Collected %d evacuation candidates for space %s\n",
900 AllocationSpaceName(space->identity()));
905 void MarkCompactCollector::AbortCompaction() {
907 int npages = evacuation_candidates_.length();
908 for (int i = 0; i < npages; i++) {
909 Page* p = evacuation_candidates_[i];
910 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
911 p->ClearEvacuationCandidate();
912 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
915 evacuation_candidates_.Rewind(0);
916 invalidated_code_.Rewind(0);
918 ASSERT_EQ(0, evacuation_candidates_.length());
922 void MarkCompactCollector::Prepare(GCTracer* tracer) {
923 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
925 // Rather than passing the tracer around we stash it in a static member
930 ASSERT(state_ == IDLE);
934 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
936 if (IsConcurrentSweepingInProgress()) {
937 // Instead of waiting we could also abort the sweeper threads here.
938 WaitUntilSweepingCompleted();
941 // Clear marking bits if incremental marking is aborted.
942 if (was_marked_incrementally_ && abort_incremental_marking_) {
943 heap()->incremental_marking()->Abort();
946 was_marked_incrementally_ = false;
949 // Don't start compaction if we are in the middle of incremental
950 // marking cycle. We did not collect any slots.
951 if (!FLAG_never_compact && !was_marked_incrementally_) {
952 StartCompaction(NON_INCREMENTAL_COMPACTION);
955 PagedSpaces spaces(heap());
956 for (PagedSpace* space = spaces.next();
958 space = spaces.next()) {
959 space->PrepareForMarkCompact();
963 if (!was_marked_incrementally_ && FLAG_verify_heap) {
964 VerifyMarkbitsAreClean();
970 void MarkCompactCollector::Finish() {
972 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
975 // The stub cache is not traversed during GC; clear the cache to
976 // force lazy re-initialization of it. This must be done after the
977 // GC, because it relies on the new address of certain old space
978 // objects (empty string, illegal builtin).
979 isolate()->stub_cache()->Clear();
981 if (have_code_to_deoptimize_) {
982 // Some code objects were marked for deoptimization during the GC.
983 Deoptimizer::DeoptimizeMarkedCode(isolate());
984 have_code_to_deoptimize_ = false;
989 // -------------------------------------------------------------------------
990 // Phase 1: tracing and marking live objects.
991 // before: all objects are in normal state.
992 // after: a live object's map pointer is marked as '00'.
994 // Marking all live objects in the heap as part of mark-sweep or mark-compact
995 // collection. Before marking, all objects are in their normal state. After
996 // marking, live objects' map pointers are marked indicating that the object
997 // has been found reachable.
999 // The marking algorithm is a (mostly) depth-first (because of possible stack
1000 // overflow) traversal of the graph of objects reachable from the roots. It
1001 // uses an explicit stack of pointers rather than recursion. The young
1002 // generation's inactive ('from') space is used as a marking stack. The
1003 // objects in the marking stack are the ones that have been reached and marked
1004 // but their children have not yet been visited.
1006 // The marking stack can overflow during traversal. In that case, we set an
1007 // overflow flag. When the overflow flag is set, we continue marking objects
1008 // reachable from the objects on the marking stack, but no longer push them on
1009 // the marking stack. Instead, we mark them as both marked and overflowed.
1010 // When the stack is in the overflowed state, objects marked as overflowed
1011 // have been reached and marked but their children have not been visited yet.
1012 // After emptying the marking stack, we clear the overflow flag and traverse
1013 // the heap looking for objects marked as overflowed, push them on the stack,
1014 // and continue with marking. This process repeats until all reachable
1015 // objects have been marked.
1017 void CodeFlusher::ProcessJSFunctionCandidates() {
1018 Code* lazy_compile =
1019 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1020 Object* undefined = isolate_->heap()->undefined_value();
1022 JSFunction* candidate = jsfunction_candidates_head_;
1023 JSFunction* next_candidate;
1024 while (candidate != NULL) {
1025 next_candidate = GetNextCandidate(candidate);
1026 ClearNextCandidate(candidate, undefined);
1028 SharedFunctionInfo* shared = candidate->shared();
1030 Code* code = shared->code();
1031 MarkBit code_mark = Marking::MarkBitFrom(code);
1032 if (!code_mark.Get()) {
1033 if (FLAG_trace_code_flushing && shared->is_compiled()) {
1034 PrintF("[code-flushing clears: ");
1035 shared->ShortPrint();
1036 PrintF(" - age: %d]\n", code->GetAge());
1038 shared->set_code(lazy_compile);
1039 candidate->set_code(lazy_compile);
1041 candidate->set_code(code);
1044 // We are in the middle of a GC cycle so the write barrier in the code
1045 // setter did not record the slot update and we have to do that manually.
1046 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1047 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
1048 isolate_->heap()->mark_compact_collector()->
1049 RecordCodeEntrySlot(slot, target);
1051 Object** shared_code_slot =
1052 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1053 isolate_->heap()->mark_compact_collector()->
1054 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1056 candidate = next_candidate;
1059 jsfunction_candidates_head_ = NULL;
1063 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1064 Code* lazy_compile =
1065 isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
1067 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1068 SharedFunctionInfo* next_candidate;
1069 while (candidate != NULL) {
1070 next_candidate = GetNextCandidate(candidate);
1071 ClearNextCandidate(candidate);
1073 Code* code = candidate->code();
1074 MarkBit code_mark = Marking::MarkBitFrom(code);
1075 if (!code_mark.Get()) {
1076 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1077 PrintF("[code-flushing clears: ");
1078 candidate->ShortPrint();
1079 PrintF(" - age: %d]\n", code->GetAge());
1081 candidate->set_code(lazy_compile);
1084 Object** code_slot =
1085 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1086 isolate_->heap()->mark_compact_collector()->
1087 RecordSlot(code_slot, code_slot, *code_slot);
1089 candidate = next_candidate;
1092 shared_function_info_candidates_head_ = NULL;
1096 void CodeFlusher::ProcessOptimizedCodeMaps() {
1097 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
1099 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1100 SharedFunctionInfo* next_holder;
1102 while (holder != NULL) {
1103 next_holder = GetNextCodeMap(holder);
1104 ClearNextCodeMap(holder);
1106 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1107 int new_length = SharedFunctionInfo::kEntriesStart;
1108 int old_length = code_map->length();
1109 for (int i = SharedFunctionInfo::kEntriesStart;
1111 i += SharedFunctionInfo::kEntryLength) {
1113 Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
1114 if (!Marking::MarkBitFrom(code).Get()) continue;
1116 // Move every slot in the entry.
1117 for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
1118 int dst_index = new_length++;
1119 Object** slot = code_map->RawFieldOfElementAt(dst_index);
1120 Object* object = code_map->get(i + j);
1121 code_map->set(dst_index, object);
1122 if (j == SharedFunctionInfo::kOsrAstIdOffset) {
1123 ASSERT(object->IsSmi());
1125 ASSERT(Marking::IsBlack(
1126 Marking::MarkBitFrom(HeapObject::cast(*slot))));
1127 isolate_->heap()->mark_compact_collector()->
1128 RecordSlot(slot, slot, *slot);
1133 // Trim the optimized code map if entries have been removed.
1134 if (new_length < old_length) {
1135 holder->TrimOptimizedCodeMap(old_length - new_length);
1138 holder = next_holder;
1141 optimized_code_map_holder_head_ = NULL;
1145 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1146 // Make sure previous flushing decisions are revisited.
1147 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1149 if (FLAG_trace_code_flushing) {
1150 PrintF("[code-flushing abandons function-info: ");
1151 shared_info->ShortPrint();
1155 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1156 SharedFunctionInfo* next_candidate;
1157 if (candidate == shared_info) {
1158 next_candidate = GetNextCandidate(shared_info);
1159 shared_function_info_candidates_head_ = next_candidate;
1160 ClearNextCandidate(shared_info);
1162 while (candidate != NULL) {
1163 next_candidate = GetNextCandidate(candidate);
1165 if (next_candidate == shared_info) {
1166 next_candidate = GetNextCandidate(shared_info);
1167 SetNextCandidate(candidate, next_candidate);
1168 ClearNextCandidate(shared_info);
1172 candidate = next_candidate;
1178 void CodeFlusher::EvictCandidate(JSFunction* function) {
1179 ASSERT(!function->next_function_link()->IsUndefined());
1180 Object* undefined = isolate_->heap()->undefined_value();
1182 // Make sure previous flushing decisions are revisited.
1183 isolate_->heap()->incremental_marking()->RecordWrites(function);
1184 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1186 if (FLAG_trace_code_flushing) {
1187 PrintF("[code-flushing abandons closure: ");
1188 function->shared()->ShortPrint();
1192 JSFunction* candidate = jsfunction_candidates_head_;
1193 JSFunction* next_candidate;
1194 if (candidate == function) {
1195 next_candidate = GetNextCandidate(function);
1196 jsfunction_candidates_head_ = next_candidate;
1197 ClearNextCandidate(function, undefined);
1199 while (candidate != NULL) {
1200 next_candidate = GetNextCandidate(candidate);
1202 if (next_candidate == function) {
1203 next_candidate = GetNextCandidate(function);
1204 SetNextCandidate(candidate, next_candidate);
1205 ClearNextCandidate(function, undefined);
1209 candidate = next_candidate;
1215 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1216 ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1217 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1219 // Make sure previous flushing decisions are revisited.
1220 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1222 if (FLAG_trace_code_flushing) {
1223 PrintF("[code-flushing abandons code-map: ");
1224 code_map_holder->ShortPrint();
1228 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1229 SharedFunctionInfo* next_holder;
1230 if (holder == code_map_holder) {
1231 next_holder = GetNextCodeMap(code_map_holder);
1232 optimized_code_map_holder_head_ = next_holder;
1233 ClearNextCodeMap(code_map_holder);
1235 while (holder != NULL) {
1236 next_holder = GetNextCodeMap(holder);
1238 if (next_holder == code_map_holder) {
1239 next_holder = GetNextCodeMap(code_map_holder);
1240 SetNextCodeMap(holder, next_holder);
1241 ClearNextCodeMap(code_map_holder);
1245 holder = next_holder;
1251 void CodeFlusher::EvictJSFunctionCandidates() {
1252 JSFunction* candidate = jsfunction_candidates_head_;
1253 JSFunction* next_candidate;
1254 while (candidate != NULL) {
1255 next_candidate = GetNextCandidate(candidate);
1256 EvictCandidate(candidate);
1257 candidate = next_candidate;
1259 ASSERT(jsfunction_candidates_head_ == NULL);
1263 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1264 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1265 SharedFunctionInfo* next_candidate;
1266 while (candidate != NULL) {
1267 next_candidate = GetNextCandidate(candidate);
1268 EvictCandidate(candidate);
1269 candidate = next_candidate;
1271 ASSERT(shared_function_info_candidates_head_ == NULL);
1275 void CodeFlusher::EvictOptimizedCodeMaps() {
1276 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1277 SharedFunctionInfo* next_holder;
1278 while (holder != NULL) {
1279 next_holder = GetNextCodeMap(holder);
1280 EvictOptimizedCodeMap(holder);
1281 holder = next_holder;
1283 ASSERT(optimized_code_map_holder_head_ == NULL);
1287 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1288 Heap* heap = isolate_->heap();
1290 JSFunction** slot = &jsfunction_candidates_head_;
1291 JSFunction* candidate = jsfunction_candidates_head_;
1292 while (candidate != NULL) {
1293 if (heap->InFromSpace(candidate)) {
1294 v->VisitPointer(reinterpret_cast<Object**>(slot));
1296 candidate = GetNextCandidate(*slot);
1297 slot = GetNextCandidateSlot(*slot);
1302 MarkCompactCollector::~MarkCompactCollector() {
1303 if (code_flusher_ != NULL) {
1304 delete code_flusher_;
1305 code_flusher_ = NULL;
1310 static inline HeapObject* ShortCircuitConsString(Object** p) {
1311 // Optimization: If the heap object pointed to by p is a non-internalized
1312 // cons string whose right substring is HEAP->empty_string, update
1313 // it in place to its left substring. Return the updated value.
1315 // Here we assume that if we change *p, we replace it with a heap object
1316 // (i.e., the left substring of a cons string is always a heap object).
1318 // The check performed is:
1319 // object->IsConsString() && !object->IsInternalizedString() &&
1320 // (ConsString::cast(object)->second() == HEAP->empty_string())
1321 // except the maps for the object and its possible substrings might be
1323 HeapObject* object = HeapObject::cast(*p);
1324 if (!FLAG_clever_optimizations) return object;
1325 Map* map = object->map();
1326 InstanceType type = map->instance_type();
1327 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
1329 Object* second = reinterpret_cast<ConsString*>(object)->second();
1330 Heap* heap = map->GetHeap();
1331 if (second != heap->empty_string()) {
1335 // Since we don't have the object's start, it is impossible to update the
1336 // page dirty marks. Therefore, we only replace the string with its left
1337 // substring when page dirty marks do not change.
1338 Object* first = reinterpret_cast<ConsString*>(object)->first();
1339 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
1342 return HeapObject::cast(first);
1346 class MarkCompactMarkingVisitor
1347 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1349 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1350 Map* map, HeapObject* obj);
1352 static void ObjectStatsCountFixedArray(
1353 FixedArrayBase* fixed_array,
1354 FixedArraySubInstanceType fast_type,
1355 FixedArraySubInstanceType dictionary_type);
1357 template<MarkCompactMarkingVisitor::VisitorId id>
1358 class ObjectStatsTracker {
1360 static inline void Visit(Map* map, HeapObject* obj);
1363 static void Initialize();
1365 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1366 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1369 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1370 // Mark all objects pointed to in [start, end).
1371 const int kMinRangeForMarkingRecursion = 64;
1372 if (end - start >= kMinRangeForMarkingRecursion) {
1373 if (VisitUnmarkedObjects(heap, start, end)) return;
1374 // We are close to a stack overflow, so just mark the objects.
1376 MarkCompactCollector* collector = heap->mark_compact_collector();
1377 for (Object** p = start; p < end; p++) {
1378 MarkObjectByPointer(collector, start, p);
1382 // Marks the object black and pushes it on the marking stack.
1383 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1384 MarkBit mark = Marking::MarkBitFrom(object);
1385 heap->mark_compact_collector()->MarkObject(object, mark);
1388 // Marks the object black without pushing it on the marking stack.
1389 // Returns true if object needed marking and false otherwise.
1390 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1391 MarkBit mark_bit = Marking::MarkBitFrom(object);
1392 if (!mark_bit.Get()) {
1393 heap->mark_compact_collector()->SetMark(object, mark_bit);
1399 // Mark object pointed to by p.
1400 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1401 Object** anchor_slot,
1403 if (!(*p)->IsHeapObject()) return;
1404 HeapObject* object = ShortCircuitConsString(p);
1405 collector->RecordSlot(anchor_slot, p, object);
1406 MarkBit mark = Marking::MarkBitFrom(object);
1407 collector->MarkObject(object, mark);
1411 // Visit an unmarked object.
1412 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1415 ASSERT(collector->heap()->Contains(obj));
1416 ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1418 Map* map = obj->map();
1419 Heap* heap = obj->GetHeap();
1420 MarkBit mark = Marking::MarkBitFrom(obj);
1421 heap->mark_compact_collector()->SetMark(obj, mark);
1422 // Mark the map pointer and the body.
1423 MarkBit map_mark = Marking::MarkBitFrom(map);
1424 heap->mark_compact_collector()->MarkObject(map, map_mark);
1425 IterateBody(map, obj);
1428 // Visit all unmarked objects pointed to by [start, end).
1429 // Returns false if the operation fails (lack of stack space).
1430 INLINE(static bool VisitUnmarkedObjects(Heap* heap,
1433 // Return false is we are close to the stack limit.
1434 StackLimitCheck check(heap->isolate());
1435 if (check.HasOverflowed()) return false;
1437 MarkCompactCollector* collector = heap->mark_compact_collector();
1438 // Visit the unmarked objects.
1439 for (Object** p = start; p < end; p++) {
1441 if (!o->IsHeapObject()) continue;
1442 collector->RecordSlot(start, p, o);
1443 HeapObject* obj = HeapObject::cast(o);
1444 MarkBit mark = Marking::MarkBitFrom(obj);
1445 if (mark.Get()) continue;
1446 VisitUnmarkedObject(collector, obj);
1451 INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1452 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1453 shared->BeforeVisitingPointers();
1456 static void VisitWeakCollection(Map* map, HeapObject* object) {
1457 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1458 JSWeakCollection* weak_collection =
1459 reinterpret_cast<JSWeakCollection*>(object);
1461 // Enqueue weak map in linked list of encountered weak maps.
1462 if (weak_collection->next() == Smi::FromInt(0)) {
1463 weak_collection->set_next(collector->encountered_weak_collections());
1464 collector->set_encountered_weak_collections(weak_collection);
1467 // Skip visiting the backing hash table containing the mappings.
1468 int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
1469 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1472 JSWeakCollection::BodyDescriptor::kStartOffset,
1473 JSWeakCollection::kTableOffset);
1474 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
1477 JSWeakCollection::kTableOffset + kPointerSize,
1480 // Mark the backing hash table without pushing it on the marking stack.
1481 Object* table_object = weak_collection->table();
1482 if (!table_object->IsHashTable()) return;
1483 WeakHashTable* table = WeakHashTable::cast(table_object);
1484 Object** table_slot =
1485 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
1486 MarkBit table_mark = Marking::MarkBitFrom(table);
1487 collector->RecordSlot(table_slot, table_slot, table);
1488 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1489 // Recording the map slot can be skipped, because maps are not compacted.
1490 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1491 ASSERT(MarkCompactCollector::IsMarked(table->map()));
1496 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1498 // Code flushing support.
1500 static const int kRegExpCodeThreshold = 5;
1502 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1505 // Make sure that the fixed array is in fact initialized on the RegExp.
1506 // We could potentially trigger a GC when initializing the RegExp.
1507 if (HeapObject::cast(re->data())->map()->instance_type() !=
1508 FIXED_ARRAY_TYPE) return;
1510 // Make sure this is a RegExp that actually contains code.
1511 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1513 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
1514 if (!code->IsSmi() &&
1515 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1516 // Save a copy that can be reinstated if we need the code again.
1517 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
1519 // Saving a copy might create a pointer into compaction candidate
1520 // that was not observed by marker. This might happen if JSRegExp data
1521 // was marked through the compilation cache before marker reached JSRegExp
1523 FixedArray* data = FixedArray::cast(re->data());
1524 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1525 heap->mark_compact_collector()->
1526 RecordSlot(slot, slot, code);
1528 // Set a number in the 0-255 range to guarantee no smi overflow.
1529 re->SetDataAt(JSRegExp::code_index(is_ascii),
1530 Smi::FromInt(heap->sweep_generation() & 0xff));
1531 } else if (code->IsSmi()) {
1532 int value = Smi::cast(code)->value();
1533 // The regexp has not been compiled yet or there was a compilation error.
1534 if (value == JSRegExp::kUninitializedValue ||
1535 value == JSRegExp::kCompilationErrorValue) {
1539 // Check if we should flush now.
1540 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1541 re->SetDataAt(JSRegExp::code_index(is_ascii),
1542 Smi::FromInt(JSRegExp::kUninitializedValue));
1543 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1544 Smi::FromInt(JSRegExp::kUninitializedValue));
1550 // Works by setting the current sweep_generation (as a smi) in the
1551 // code object place in the data array of the RegExp and keeps a copy
1552 // around that can be reinstated if we reuse the RegExp before flushing.
1553 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1554 // we flush the code.
1555 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1556 Heap* heap = map->GetHeap();
1557 MarkCompactCollector* collector = heap->mark_compact_collector();
1558 if (!collector->is_code_flushing_enabled()) {
1559 VisitJSRegExp(map, object);
1562 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1563 // Flush code or set age on both ASCII and two byte code.
1564 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1565 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1566 // Visit the fields of the RegExp, including the updated FixedArray.
1567 VisitJSRegExp(map, object);
1570 static VisitorDispatchTable<Callback> non_count_table_;
1574 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
1575 FixedArrayBase* fixed_array,
1576 FixedArraySubInstanceType fast_type,
1577 FixedArraySubInstanceType dictionary_type) {
1578 Heap* heap = fixed_array->map()->GetHeap();
1579 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1580 fixed_array->map() != heap->fixed_double_array_map() &&
1581 fixed_array != heap->empty_fixed_array()) {
1582 if (fixed_array->IsDictionary()) {
1583 heap->RecordFixedArraySubTypeStats(dictionary_type,
1584 fixed_array->Size());
1586 heap->RecordFixedArraySubTypeStats(fast_type,
1587 fixed_array->Size());
1593 void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1594 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
1595 Heap* heap = map->GetHeap();
1596 int object_size = obj->Size();
1597 heap->RecordObjectStats(map->instance_type(), object_size);
1598 non_count_table_.GetVisitorById(id)(map, obj);
1599 if (obj->IsJSObject()) {
1600 JSObject* object = JSObject::cast(obj);
1601 ObjectStatsCountFixedArray(object->elements(),
1602 DICTIONARY_ELEMENTS_SUB_TYPE,
1603 FAST_ELEMENTS_SUB_TYPE);
1604 ObjectStatsCountFixedArray(object->properties(),
1605 DICTIONARY_PROPERTIES_SUB_TYPE,
1606 FAST_PROPERTIES_SUB_TYPE);
1611 template<MarkCompactMarkingVisitor::VisitorId id>
1612 void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
1613 Map* map, HeapObject* obj) {
1614 ObjectStatsVisitBase(id, map, obj);
1619 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1620 MarkCompactMarkingVisitor::kVisitMap> {
1622 static inline void Visit(Map* map, HeapObject* obj) {
1623 Heap* heap = map->GetHeap();
1624 Map* map_obj = Map::cast(obj);
1625 ASSERT(map->instance_type() == MAP_TYPE);
1626 DescriptorArray* array = map_obj->instance_descriptors();
1627 if (map_obj->owns_descriptors() &&
1628 array != heap->empty_descriptor_array()) {
1629 int fixed_array_size = array->Size();
1630 heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
1633 if (map_obj->HasTransitionArray()) {
1634 int fixed_array_size = map_obj->transitions()->Size();
1635 heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
1638 if (map_obj->has_code_cache()) {
1639 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
1640 heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
1641 cache->default_cache()->Size());
1642 if (!cache->normal_type_cache()->IsUndefined()) {
1643 heap->RecordFixedArraySubTypeStats(
1644 MAP_CODE_CACHE_SUB_TYPE,
1645 FixedArray::cast(cache->normal_type_cache())->Size());
1648 ObjectStatsVisitBase(kVisitMap, map, obj);
1654 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1655 MarkCompactMarkingVisitor::kVisitCode> {
1657 static inline void Visit(Map* map, HeapObject* obj) {
1658 Heap* heap = map->GetHeap();
1659 int object_size = obj->Size();
1660 ASSERT(map->instance_type() == CODE_TYPE);
1661 Code* code_obj = Code::cast(obj);
1662 heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
1664 ObjectStatsVisitBase(kVisitCode, map, obj);
1670 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1671 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
1673 static inline void Visit(Map* map, HeapObject* obj) {
1674 Heap* heap = map->GetHeap();
1675 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1676 if (sfi->scope_info() != heap->empty_fixed_array()) {
1677 heap->RecordFixedArraySubTypeStats(
1678 SCOPE_INFO_SUB_TYPE,
1679 FixedArray::cast(sfi->scope_info())->Size());
1681 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1687 class MarkCompactMarkingVisitor::ObjectStatsTracker<
1688 MarkCompactMarkingVisitor::kVisitFixedArray> {
1690 static inline void Visit(Map* map, HeapObject* obj) {
1691 Heap* heap = map->GetHeap();
1692 FixedArray* fixed_array = FixedArray::cast(obj);
1693 if (fixed_array == heap->string_table()) {
1694 heap->RecordFixedArraySubTypeStats(
1695 STRING_TABLE_SUB_TYPE,
1696 fixed_array->Size());
1698 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
1703 void MarkCompactMarkingVisitor::Initialize() {
1704 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1706 table_.Register(kVisitJSRegExp,
1707 &VisitRegExpAndFlushCode);
1709 if (FLAG_track_gc_object_stats) {
1710 // Copy the visitor table to make call-through possible.
1711 non_count_table_.CopyFrom(&table_);
1712 #define VISITOR_ID_COUNT_FUNCTION(id) \
1713 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1714 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1715 #undef VISITOR_ID_COUNT_FUNCTION
1720 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1721 MarkCompactMarkingVisitor::non_count_table_;
1724 class CodeMarkingVisitor : public ThreadVisitor {
1726 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1727 : collector_(collector) {}
1729 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1730 collector_->PrepareThreadForCodeFlushing(isolate, top);
1734 MarkCompactCollector* collector_;
1738 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1740 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1741 : collector_(collector) {}
1743 void VisitPointers(Object** start, Object** end) {
1744 for (Object** p = start; p < end; p++) VisitPointer(p);
1747 void VisitPointer(Object** slot) {
1748 Object* obj = *slot;
1749 if (obj->IsSharedFunctionInfo()) {
1750 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1751 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1752 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1753 collector_->MarkObject(shared->code(), code_mark);
1754 collector_->MarkObject(shared, shared_mark);
1759 MarkCompactCollector* collector_;
1763 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1764 ThreadLocalTop* top) {
1765 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1766 // Note: for the frame that has a pending lazy deoptimization
1767 // StackFrame::unchecked_code will return a non-optimized code object for
1768 // the outermost function and StackFrame::LookupCode will return
1769 // actual optimized code object.
1770 StackFrame* frame = it.frame();
1771 Code* code = frame->unchecked_code();
1772 MarkBit code_mark = Marking::MarkBitFrom(code);
1773 MarkObject(code, code_mark);
1774 if (frame->is_optimized()) {
1775 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1776 frame->LookupCode());
1782 void MarkCompactCollector::PrepareForCodeFlushing() {
1783 // Enable code flushing for non-incremental cycles.
1784 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1785 EnableCodeFlushing(!was_marked_incrementally_);
1788 // If code flushing is disabled, there is no need to prepare for it.
1789 if (!is_code_flushing_enabled()) return;
1791 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1792 // relies on it being marked before any other descriptor array.
1793 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1794 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1795 MarkObject(descriptor_array, descriptor_array_mark);
1797 // Make sure we are not referencing the code from the stack.
1798 ASSERT(this == heap()->mark_compact_collector());
1799 PrepareThreadForCodeFlushing(heap()->isolate(),
1800 heap()->isolate()->thread_local_top());
1802 // Iterate the archived stacks in all threads to check if
1803 // the code is referenced.
1804 CodeMarkingVisitor code_marking_visitor(this);
1805 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1806 &code_marking_visitor);
1808 SharedFunctionInfoMarkingVisitor visitor(this);
1809 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1810 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1812 ProcessMarkingDeque();
1816 // Visitor class for marking heap roots.
1817 class RootMarkingVisitor : public ObjectVisitor {
1819 explicit RootMarkingVisitor(Heap* heap)
1820 : collector_(heap->mark_compact_collector()) { }
1822 void VisitPointer(Object** p) {
1823 MarkObjectByPointer(p);
1826 void VisitPointers(Object** start, Object** end) {
1827 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1831 void MarkObjectByPointer(Object** p) {
1832 if (!(*p)->IsHeapObject()) return;
1834 // Replace flat cons strings in place.
1835 HeapObject* object = ShortCircuitConsString(p);
1836 MarkBit mark_bit = Marking::MarkBitFrom(object);
1837 if (mark_bit.Get()) return;
1839 Map* map = object->map();
1841 collector_->SetMark(object, mark_bit);
1843 // Mark the map pointer and body, and push them on the marking stack.
1844 MarkBit map_mark = Marking::MarkBitFrom(map);
1845 collector_->MarkObject(map, map_mark);
1846 MarkCompactMarkingVisitor::IterateBody(map, object);
1848 // Mark all the objects reachable from the map and body. May leave
1849 // overflowed objects in the heap.
1850 collector_->EmptyMarkingDeque();
1853 MarkCompactCollector* collector_;
1857 // Helper class for pruning the string table.
1858 template<bool finalize_external_strings>
1859 class StringTableCleaner : public ObjectVisitor {
1861 explicit StringTableCleaner(Heap* heap)
1862 : heap_(heap), pointers_removed_(0) { }
1864 virtual void VisitPointers(Object** start, Object** end) {
1865 // Visit all HeapObject pointers in [start, end).
1866 for (Object** p = start; p < end; p++) {
1868 if (o->IsHeapObject() &&
1869 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1870 if (finalize_external_strings) {
1871 ASSERT(o->IsExternalString());
1872 heap_->FinalizeExternalString(String::cast(*p));
1874 pointers_removed_++;
1876 // Set the entry to the_hole_value (as deleted).
1877 *p = heap_->the_hole_value();
1882 int PointersRemoved() {
1883 ASSERT(!finalize_external_strings);
1884 return pointers_removed_;
1889 int pointers_removed_;
1893 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1894 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1897 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1899 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1901 virtual Object* RetainAs(Object* object) {
1902 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1904 } else if (object->IsAllocationSite() &&
1905 !(AllocationSite::cast(object)->IsZombie())) {
1906 // "dead" AllocationSites need to live long enough for a traversal of new
1907 // space. These sites get a one-time reprieve.
1908 AllocationSite* site = AllocationSite::cast(object);
1910 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1919 // Fill the marking stack with overflowed objects returned by the given
1920 // iterator. Stop when the marking stack is filled or the end of the space
1921 // is reached, whichever comes first.
1923 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1924 MarkingDeque* marking_deque,
1926 // The caller should ensure that the marking stack is initially not full,
1927 // so that we don't waste effort pointlessly scanning for objects.
1928 ASSERT(!marking_deque->IsFull());
1930 Map* filler_map = heap->one_pointer_filler_map();
1931 for (HeapObject* object = it->Next();
1933 object = it->Next()) {
1934 MarkBit markbit = Marking::MarkBitFrom(object);
1935 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1936 Marking::GreyToBlack(markbit);
1937 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1938 marking_deque->PushBlack(object);
1939 if (marking_deque->IsFull()) return;
1945 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1948 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1950 ASSERT(!marking_deque->IsFull());
1951 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1952 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1953 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1954 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1956 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1957 Address cell_base = it.CurrentCellBase();
1958 MarkBit::CellType* cell = it.CurrentCell();
1960 const MarkBit::CellType current_cell = *cell;
1961 if (current_cell == 0) continue;
1963 MarkBit::CellType grey_objects;
1965 const MarkBit::CellType next_cell = *(cell+1);
1966 grey_objects = current_cell &
1967 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1969 grey_objects = current_cell & (current_cell >> 1);
1973 while (grey_objects != 0) {
1974 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1975 grey_objects >>= trailing_zeros;
1976 offset += trailing_zeros;
1977 MarkBit markbit(cell, 1 << offset, false);
1978 ASSERT(Marking::IsGrey(markbit));
1979 Marking::GreyToBlack(markbit);
1980 Address addr = cell_base + offset * kPointerSize;
1981 HeapObject* object = HeapObject::FromAddress(addr);
1982 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1983 marking_deque->PushBlack(object);
1984 if (marking_deque->IsFull()) return;
1989 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1994 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
1995 NewSpace* new_space,
1997 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1998 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1999 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
2000 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
2002 MarkBit::CellType* cells = p->markbits()->cells();
2003 int survivors_size = 0;
2005 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
2006 Address cell_base = it.CurrentCellBase();
2007 MarkBit::CellType* cell = it.CurrentCell();
2009 MarkBit::CellType current_cell = *cell;
2010 if (current_cell == 0) continue;
2013 while (current_cell != 0) {
2014 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2015 current_cell >>= trailing_zeros;
2016 offset += trailing_zeros;
2017 Address address = cell_base + offset * kPointerSize;
2018 HeapObject* object = HeapObject::FromAddress(address);
2020 int size = object->Size();
2021 survivors_size += size;
2023 Heap::UpdateAllocationSiteFeedback(object);
2027 // Aggressively promote young survivors to the old space.
2028 if (TryPromoteObject(object, size)) {
2032 // Promotion failed. Just migrate object to another semispace.
2033 MaybeObject* allocation = new_space->AllocateRaw(size);
2034 if (allocation->IsFailure()) {
2035 if (!new_space->AddFreshPage()) {
2036 // Shouldn't happen. We are sweeping linearly, and to-space
2037 // has the same number of pages as from-space, so there is
2041 allocation = new_space->AllocateRaw(size);
2042 ASSERT(!allocation->IsFailure());
2044 Object* target = allocation->ToObjectUnchecked();
2046 MigrateObject(HeapObject::cast(target)->address(),
2053 return survivors_size;
2057 static void DiscoverGreyObjectsInSpace(Heap* heap,
2058 MarkingDeque* marking_deque,
2059 PagedSpace* space) {
2060 if (!space->was_swept_conservatively()) {
2061 HeapObjectIterator it(space);
2062 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2064 PageIterator it(space);
2065 while (it.has_next()) {
2066 Page* p = it.next();
2067 DiscoverGreyObjectsOnPage(marking_deque, p);
2068 if (marking_deque->IsFull()) return;
2074 static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2075 MarkingDeque* marking_deque) {
2076 NewSpace* space = heap->new_space();
2077 NewSpacePageIterator it(space->bottom(), space->top());
2078 while (it.has_next()) {
2079 NewSpacePage* page = it.next();
2080 DiscoverGreyObjectsOnPage(marking_deque, page);
2081 if (marking_deque->IsFull()) return;
2086 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2088 if (!o->IsHeapObject()) return false;
2089 HeapObject* heap_object = HeapObject::cast(o);
2090 MarkBit mark = Marking::MarkBitFrom(heap_object);
2095 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2098 ASSERT(o->IsHeapObject());
2099 HeapObject* heap_object = HeapObject::cast(o);
2100 MarkBit mark = Marking::MarkBitFrom(heap_object);
2105 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
2106 StringTable* string_table = heap()->string_table();
2107 // Mark the string table itself.
2108 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2109 SetMark(string_table, string_table_mark);
2110 // Explicitly mark the prefix.
2111 string_table->IteratePrefix(visitor);
2112 ProcessMarkingDeque();
2116 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2117 MarkBit mark_bit = Marking::MarkBitFrom(site);
2118 SetMark(site, mark_bit);
2122 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2123 // Mark the heap roots including global variables, stack variables,
2124 // etc., and all objects reachable from them.
2125 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2127 // Handle the string table specially.
2128 MarkStringTable(visitor);
2130 MarkWeakObjectToCodeTable();
2132 // There may be overflowed objects in the heap. Visit them now.
2133 while (marking_deque_.overflowed()) {
2134 RefillMarkingDeque();
2135 EmptyMarkingDeque();
2140 void MarkCompactCollector::MarkImplicitRefGroups() {
2141 List<ImplicitRefGroup*>* ref_groups =
2142 isolate()->global_handles()->implicit_ref_groups();
2145 for (int i = 0; i < ref_groups->length(); i++) {
2146 ImplicitRefGroup* entry = ref_groups->at(i);
2147 ASSERT(entry != NULL);
2149 if (!IsMarked(*entry->parent)) {
2150 (*ref_groups)[last++] = entry;
2154 Object*** children = entry->children;
2155 // A parent object is marked, so mark all child heap objects.
2156 for (size_t j = 0; j < entry->length; ++j) {
2157 if ((*children[j])->IsHeapObject()) {
2158 HeapObject* child = HeapObject::cast(*children[j]);
2159 MarkBit mark = Marking::MarkBitFrom(child);
2160 MarkObject(child, mark);
2164 // Once the entire group has been marked, dispose it because it's
2165 // not needed anymore.
2168 ref_groups->Rewind(last);
2172 void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2173 HeapObject* weak_object_to_code_table =
2174 HeapObject::cast(heap()->weak_object_to_code_table());
2175 if (!IsMarked(weak_object_to_code_table)) {
2176 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2177 SetMark(weak_object_to_code_table, mark);
2182 // Mark all objects reachable from the objects on the marking stack.
2183 // Before: the marking stack contains zero or more heap object pointers.
2184 // After: the marking stack is empty, and all objects reachable from the
2185 // marking stack have been marked, or are overflowed in the heap.
2186 void MarkCompactCollector::EmptyMarkingDeque() {
2187 while (!marking_deque_.IsEmpty()) {
2188 HeapObject* object = marking_deque_.Pop();
2189 ASSERT(object->IsHeapObject());
2190 ASSERT(heap()->Contains(object));
2191 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2193 Map* map = object->map();
2194 MarkBit map_mark = Marking::MarkBitFrom(map);
2195 MarkObject(map, map_mark);
2197 MarkCompactMarkingVisitor::IterateBody(map, object);
2202 // Sweep the heap for overflowed objects, clear their overflow bits, and
2203 // push them on the marking stack. Stop early if the marking stack fills
2204 // before sweeping completes. If sweeping completes, there are no remaining
2205 // overflowed objects in the heap so the overflow flag on the markings stack
2207 void MarkCompactCollector::RefillMarkingDeque() {
2208 ASSERT(marking_deque_.overflowed());
2210 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
2211 if (marking_deque_.IsFull()) return;
2213 DiscoverGreyObjectsInSpace(heap(),
2215 heap()->old_pointer_space());
2216 if (marking_deque_.IsFull()) return;
2218 DiscoverGreyObjectsInSpace(heap(),
2220 heap()->old_data_space());
2221 if (marking_deque_.IsFull()) return;
2223 DiscoverGreyObjectsInSpace(heap(),
2225 heap()->code_space());
2226 if (marking_deque_.IsFull()) return;
2228 DiscoverGreyObjectsInSpace(heap(),
2230 heap()->map_space());
2231 if (marking_deque_.IsFull()) return;
2233 DiscoverGreyObjectsInSpace(heap(),
2235 heap()->cell_space());
2236 if (marking_deque_.IsFull()) return;
2238 DiscoverGreyObjectsInSpace(heap(),
2240 heap()->property_cell_space());
2241 if (marking_deque_.IsFull()) return;
2243 LargeObjectIterator lo_it(heap()->lo_space());
2244 DiscoverGreyObjectsWithIterator(heap(),
2247 if (marking_deque_.IsFull()) return;
2249 marking_deque_.ClearOverflowed();
2253 // Mark all objects reachable (transitively) from objects on the marking
2254 // stack. Before: the marking stack contains zero or more heap object
2255 // pointers. After: the marking stack is empty and there are no overflowed
2256 // objects in the heap.
2257 void MarkCompactCollector::ProcessMarkingDeque() {
2258 EmptyMarkingDeque();
2259 while (marking_deque_.overflowed()) {
2260 RefillMarkingDeque();
2261 EmptyMarkingDeque();
2266 // Mark all objects reachable (transitively) from objects on the marking
2267 // stack including references only considered in the atomic marking pause.
2268 void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
2269 bool work_to_do = true;
2270 ASSERT(marking_deque_.IsEmpty());
2271 while (work_to_do) {
2272 isolate()->global_handles()->IterateObjectGroups(
2273 visitor, &IsUnmarkedHeapObjectWithHeap);
2274 MarkImplicitRefGroups();
2275 ProcessWeakCollections();
2276 work_to_do = !marking_deque_.IsEmpty();
2277 ProcessMarkingDeque();
2282 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2283 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2284 !it.done(); it.Advance()) {
2285 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2288 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2289 Code* code = it.frame()->LookupCode();
2290 if (!code->CanDeoptAt(it.frame()->pc())) {
2291 code->CodeIterateBody(visitor);
2293 ProcessMarkingDeque();
2300 void MarkCompactCollector::MarkLiveObjects() {
2301 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2302 // The recursive GC marker detects when it is nearing stack overflow,
2303 // and switches to a different marking system. JS interrupts interfere
2304 // with the C stack limit check.
2305 PostponeInterruptsScope postpone(isolate());
2307 bool incremental_marking_overflowed = false;
2308 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2309 if (was_marked_incrementally_) {
2310 // Finalize the incremental marking and check whether we had an overflow.
2311 // Both markers use grey color to mark overflowed objects so
2312 // non-incremental marker can deal with them as if overflow
2313 // occured during normal marking.
2314 // But incremental marker uses a separate marking deque
2315 // so we have to explicitly copy its overflow state.
2316 incremental_marking->Finalize();
2317 incremental_marking_overflowed =
2318 incremental_marking->marking_deque()->overflowed();
2319 incremental_marking->marking_deque()->ClearOverflowed();
2321 // Abort any pending incremental activities e.g. incremental sweeping.
2322 incremental_marking->Abort();
2326 ASSERT(state_ == PREPARE_GC);
2327 state_ = MARK_LIVE_OBJECTS;
2329 // The to space contains live objects, a page in from space is used as a
2331 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2332 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2333 if (FLAG_force_marking_deque_overflows) {
2334 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2336 marking_deque_.Initialize(marking_deque_start,
2338 ASSERT(!marking_deque_.overflowed());
2340 if (incremental_marking_overflowed) {
2341 // There are overflowed objects left in the heap after incremental marking.
2342 marking_deque_.SetOverflowed();
2345 PrepareForCodeFlushing();
2347 if (was_marked_incrementally_) {
2348 // There is no write barrier on cells so we have to scan them now at the end
2349 // of the incremental marking.
2351 HeapObjectIterator cell_iterator(heap()->cell_space());
2353 while ((cell = cell_iterator.Next()) != NULL) {
2354 ASSERT(cell->IsCell());
2355 if (IsMarked(cell)) {
2356 int offset = Cell::kValueOffset;
2357 MarkCompactMarkingVisitor::VisitPointer(
2359 reinterpret_cast<Object**>(cell->address() + offset));
2364 HeapObjectIterator js_global_property_cell_iterator(
2365 heap()->property_cell_space());
2367 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
2368 ASSERT(cell->IsPropertyCell());
2369 if (IsMarked(cell)) {
2370 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
2376 RootMarkingVisitor root_visitor(heap());
2377 MarkRoots(&root_visitor);
2379 ProcessTopOptimizedFrame(&root_visitor);
2381 // The objects reachable from the roots are marked, yet unreachable
2382 // objects are unmarked. Mark objects reachable due to host
2383 // application specific logic or through Harmony weak maps.
2384 ProcessEphemeralMarking(&root_visitor);
2386 // The objects reachable from the roots, weak maps or object groups
2387 // are marked, yet unreachable objects are unmarked. Mark objects
2388 // reachable only from weak global handles.
2390 // First we identify nonlive weak handles and mark them as pending
2392 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2393 &IsUnmarkedHeapObject);
2394 // Then we mark the objects and process the transitive closure.
2395 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2396 while (marking_deque_.overflowed()) {
2397 RefillMarkingDeque();
2398 EmptyMarkingDeque();
2401 // Repeat host application specific and Harmony weak maps marking to
2402 // mark unmarked objects reachable from the weak roots.
2403 ProcessEphemeralMarking(&root_visitor);
2409 void MarkCompactCollector::AfterMarking() {
2410 // Object literal map caches reference strings (cache keys) and maps
2411 // (cache values). At this point still useful maps have already been
2412 // marked. Mark the keys for the alive values before we process the
2416 // Prune the string table removing all strings only pointed to by the
2417 // string table. Cannot use string_table() here because the string
2419 StringTable* string_table = heap()->string_table();
2420 InternalizedStringTableCleaner internalized_visitor(heap());
2421 string_table->IterateElements(&internalized_visitor);
2422 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2424 ExternalStringTableCleaner external_visitor(heap());
2425 heap()->external_string_table_.Iterate(&external_visitor);
2426 heap()->external_string_table_.CleanUp();
2428 // Process the weak references.
2429 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2430 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2432 // Remove object groups after marking phase.
2433 heap()->isolate()->global_handles()->RemoveObjectGroups();
2434 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2436 // Flush code from collected candidates.
2437 if (is_code_flushing_enabled()) {
2438 code_flusher_->ProcessCandidates();
2439 // If incremental marker does not support code flushing, we need to
2440 // disable it before incremental marking steps for next cycle.
2441 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2442 EnableCodeFlushing(false);
2446 if (FLAG_track_gc_object_stats) {
2447 heap()->CheckpointObjectStats();
2452 void MarkCompactCollector::ProcessMapCaches() {
2453 Object* raw_context = heap()->native_contexts_list_;
2454 while (raw_context != heap()->undefined_value()) {
2455 Context* context = reinterpret_cast<Context*>(raw_context);
2456 if (IsMarked(context)) {
2457 HeapObject* raw_map_cache =
2458 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2459 // A map cache may be reachable from the stack. In this case
2460 // it's already transitively marked and it's too late to clean
2462 if (!IsMarked(raw_map_cache) &&
2463 raw_map_cache != heap()->undefined_value()) {
2464 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2465 int existing_elements = map_cache->NumberOfElements();
2466 int used_elements = 0;
2467 for (int i = MapCache::kElementsStartIndex;
2468 i < map_cache->length();
2469 i += MapCache::kEntrySize) {
2470 Object* raw_key = map_cache->get(i);
2471 if (raw_key == heap()->undefined_value() ||
2472 raw_key == heap()->the_hole_value()) continue;
2473 STATIC_ASSERT(MapCache::kEntrySize == 2);
2474 Object* raw_map = map_cache->get(i + 1);
2475 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2478 // Delete useless entries with unmarked maps.
2479 ASSERT(raw_map->IsMap());
2480 map_cache->set_the_hole(i);
2481 map_cache->set_the_hole(i + 1);
2484 if (used_elements == 0) {
2485 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2487 // Note: we don't actually shrink the cache here to avoid
2488 // extra complexity during GC. We rely on subsequent cache
2489 // usages (EnsureCapacity) to do this.
2490 map_cache->ElementsRemoved(existing_elements - used_elements);
2491 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2492 MarkObject(map_cache, map_cache_markbit);
2496 // Move to next element in the list.
2497 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2499 ProcessMarkingDeque();
2503 void MarkCompactCollector::ReattachInitialMaps() {
2504 HeapObjectIterator map_iterator(heap()->map_space());
2505 for (HeapObject* obj = map_iterator.Next();
2507 obj = map_iterator.Next()) {
2508 Map* map = Map::cast(obj);
2510 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2511 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2513 if (map->attached_to_shared_function_info()) {
2514 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2520 void MarkCompactCollector::ClearNonLiveReferences() {
2521 // Iterate over the map space, setting map transitions that go from
2522 // a marked map to an unmarked map to null transitions. This action
2523 // is carried out only on maps of JSObjects and related subtypes.
2524 HeapObjectIterator map_iterator(heap()->map_space());
2525 for (HeapObject* obj = map_iterator.Next();
2527 obj = map_iterator.Next()) {
2528 Map* map = Map::cast(obj);
2530 if (!map->CanTransition()) continue;
2532 MarkBit map_mark = Marking::MarkBitFrom(map);
2533 if (map_mark.Get() && map->attached_to_shared_function_info()) {
2534 // This map is used for inobject slack tracking and has been detached
2535 // from SharedFunctionInfo during the mark phase.
2536 // Since it survived the GC, reattach it now.
2537 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2540 ClearNonLivePrototypeTransitions(map);
2541 ClearNonLiveMapTransitions(map, map_mark);
2543 if (map_mark.Get()) {
2544 ClearNonLiveDependentCode(map->dependent_code());
2546 ClearAndDeoptimizeDependentCode(map->dependent_code());
2547 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
2551 // Iterate over property cell space, removing dependent code that is not
2552 // otherwise kept alive by strong references.
2553 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2554 for (HeapObject* cell = cell_iterator.Next();
2556 cell = cell_iterator.Next()) {
2557 if (IsMarked(cell)) {
2558 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2562 // Iterate over allocation sites, removing dependent code that is not
2563 // otherwise kept alive by strong references.
2564 Object* undefined = heap()->undefined_value();
2565 for (Object* site = heap()->allocation_sites_list();
2567 site = AllocationSite::cast(site)->weak_next()) {
2568 if (IsMarked(site)) {
2569 ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
2573 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2574 WeakHashTable* table =
2575 WeakHashTable::cast(heap_->weak_object_to_code_table());
2576 uint32_t capacity = table->Capacity();
2577 for (uint32_t i = 0; i < capacity; i++) {
2578 uint32_t key_index = table->EntryToIndex(i);
2579 Object* key = table->get(key_index);
2580 if (!table->IsKey(key)) continue;
2581 uint32_t value_index = table->EntryToValueIndex(i);
2582 Object* value = table->get(value_index);
2583 if (key->IsCell() && !IsMarked(key)) {
2584 Cell* cell = Cell::cast(key);
2585 Object* object = cell->value();
2586 if (IsMarked(object)) {
2587 MarkBit mark = Marking::MarkBitFrom(cell);
2588 SetMark(cell, mark);
2589 Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
2590 RecordSlot(value_slot, value_slot, *value_slot);
2593 if (IsMarked(key)) {
2594 if (!IsMarked(value)) {
2595 HeapObject* obj = HeapObject::cast(value);
2596 MarkBit mark = Marking::MarkBitFrom(obj);
2599 ClearNonLiveDependentCode(DependentCode::cast(value));
2601 ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2602 table->set(key_index, heap_->the_hole_value());
2603 table->set(value_index, heap_->the_hole_value());
2610 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2611 int number_of_transitions = map->NumberOfProtoTransitions();
2612 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
2614 int new_number_of_transitions = 0;
2615 const int header = Map::kProtoTransitionHeaderSize;
2616 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2617 const int map_offset = header + Map::kProtoTransitionMapOffset;
2618 const int step = Map::kProtoTransitionElementsPerEntry;
2619 for (int i = 0; i < number_of_transitions; i++) {
2620 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2621 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2622 if (IsMarked(prototype) && IsMarked(cached_map)) {
2623 ASSERT(!prototype->IsUndefined());
2624 int proto_index = proto_offset + new_number_of_transitions * step;
2625 int map_index = map_offset + new_number_of_transitions * step;
2626 if (new_number_of_transitions != i) {
2627 prototype_transitions->set(
2630 UPDATE_WRITE_BARRIER);
2631 prototype_transitions->set(
2634 SKIP_WRITE_BARRIER);
2636 Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
2637 RecordSlot(slot, slot, prototype);
2638 new_number_of_transitions++;
2642 if (new_number_of_transitions != number_of_transitions) {
2643 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2646 // Fill slots that became free with undefined value.
2647 for (int i = new_number_of_transitions * step;
2648 i < number_of_transitions * step;
2650 prototype_transitions->set_undefined(header + i);
2655 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2657 Object* potential_parent = map->GetBackPointer();
2658 if (!potential_parent->IsMap()) return;
2659 Map* parent = Map::cast(potential_parent);
2661 // Follow back pointer, check whether we are dealing with a map transition
2662 // from a live map to a dead path and in case clear transitions of parent.
2663 bool current_is_alive = map_mark.Get();
2664 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2665 if (!current_is_alive && parent_is_alive) {
2666 parent->ClearNonLiveTransitions(heap());
2671 void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2672 DependentCode* entries) {
2673 DisallowHeapAllocation no_allocation;
2674 DependentCode::GroupStartIndexes starts(entries);
2675 int number_of_entries = starts.number_of_entries();
2676 if (number_of_entries == 0) return;
2677 for (int i = 0; i < number_of_entries; i++) {
2678 // If the entry is compilation info then the map must be alive,
2679 // and ClearAndDeoptimizeDependentCode shouldn't be called.
2680 ASSERT(entries->is_code_at(i));
2681 Code* code = entries->code_at(i);
2683 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2684 code->set_marked_for_deoptimization(true);
2685 code->InvalidateEmbeddedObjects();
2686 have_code_to_deoptimize_ = true;
2688 entries->clear_at(i);
2693 void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
2694 DisallowHeapAllocation no_allocation;
2695 DependentCode::GroupStartIndexes starts(entries);
2696 int number_of_entries = starts.number_of_entries();
2697 if (number_of_entries == 0) return;
2698 int new_number_of_entries = 0;
2699 // Go through all groups, remove dead codes and compact.
2700 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2701 int group_number_of_entries = 0;
2702 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2703 Object* obj = entries->object_at(i);
2704 ASSERT(obj->IsCode() || IsMarked(obj));
2705 if (IsMarked(obj) &&
2706 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
2707 if (new_number_of_entries + group_number_of_entries != i) {
2708 entries->set_object_at(
2709 new_number_of_entries + group_number_of_entries, obj);
2711 Object** slot = entries->slot_at(new_number_of_entries +
2712 group_number_of_entries);
2713 RecordSlot(slot, slot, obj);
2714 group_number_of_entries++;
2717 entries->set_number_of_entries(
2718 static_cast<DependentCode::DependencyGroup>(g),
2719 group_number_of_entries);
2720 new_number_of_entries += group_number_of_entries;
2722 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2723 entries->clear_at(i);
2728 void MarkCompactCollector::ProcessWeakCollections() {
2729 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2730 Object* weak_collection_obj = encountered_weak_collections();
2731 while (weak_collection_obj != Smi::FromInt(0)) {
2732 ASSERT(MarkCompactCollector::IsMarked(
2733 HeapObject::cast(weak_collection_obj)));
2734 JSWeakCollection* weak_collection =
2735 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2736 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2737 Object** anchor = reinterpret_cast<Object**>(table->address());
2738 for (int i = 0; i < table->Capacity(); i++) {
2739 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2741 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2742 RecordSlot(anchor, key_slot, *key_slot);
2743 Object** value_slot =
2744 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2745 MarkCompactMarkingVisitor::MarkObjectByPointer(
2746 this, anchor, value_slot);
2749 weak_collection_obj = weak_collection->next();
2754 void MarkCompactCollector::ClearWeakCollections() {
2755 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2756 Object* weak_collection_obj = encountered_weak_collections();
2757 while (weak_collection_obj != Smi::FromInt(0)) {
2758 ASSERT(MarkCompactCollector::IsMarked(
2759 HeapObject::cast(weak_collection_obj)));
2760 JSWeakCollection* weak_collection =
2761 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2762 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2763 for (int i = 0; i < table->Capacity(); i++) {
2764 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2765 table->RemoveEntry(i);
2768 weak_collection_obj = weak_collection->next();
2769 weak_collection->set_next(Smi::FromInt(0));
2771 set_encountered_weak_collections(Smi::FromInt(0));
2775 // We scavange new space simultaneously with sweeping. This is done in two
2778 // The first pass migrates all alive objects from one semispace to another or
2779 // promotes them to old space. Forwarding address is written directly into
2780 // first word of object without any encoding. If object is dead we write
2781 // NULL as a forwarding address.
2783 // The second pass updates pointers to new space in all spaces. It is possible
2784 // to encounter pointers to dead new space objects during traversal of pointers
2785 // to new space. We should clear them to avoid encountering them during next
2786 // pointer iteration. This is an issue if the store buffer overflows and we
2787 // have to scan the entire old space, including dead objects, looking for
2788 // pointers to new space.
2789 void MarkCompactCollector::MigrateObject(Address dst,
2792 AllocationSpace dest) {
2793 HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
2794 if (heap_profiler->is_tracking_object_moves()) {
2795 heap_profiler->ObjectMoveEvent(src, dst, size);
2797 ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2798 ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2799 if (dest == OLD_POINTER_SPACE) {
2800 Address src_slot = src;
2801 Address dst_slot = dst;
2802 ASSERT(IsAligned(size, kPointerSize));
2804 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2805 Object* value = Memory::Object_at(src_slot);
2807 Memory::Object_at(dst_slot) = value;
2809 if (heap_->InNewSpace(value)) {
2810 heap_->store_buffer()->Mark(dst_slot);
2811 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2812 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2813 &migration_slots_buffer_,
2814 reinterpret_cast<Object**>(dst_slot),
2815 SlotsBuffer::IGNORE_OVERFLOW);
2818 src_slot += kPointerSize;
2819 dst_slot += kPointerSize;
2822 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2823 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2824 Address code_entry = Memory::Address_at(code_entry_slot);
2826 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2827 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2828 &migration_slots_buffer_,
2829 SlotsBuffer::CODE_ENTRY_SLOT,
2831 SlotsBuffer::IGNORE_OVERFLOW);
2834 } else if (dest == CODE_SPACE) {
2835 PROFILE(isolate(), CodeMoveEvent(src, dst));
2836 heap()->MoveBlock(dst, src, size);
2837 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2838 &migration_slots_buffer_,
2839 SlotsBuffer::RELOCATED_CODE_OBJECT,
2841 SlotsBuffer::IGNORE_OVERFLOW);
2842 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2844 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2845 heap()->MoveBlock(dst, src, size);
2847 Memory::Address_at(src) = dst;
2851 // Visitor for updating pointers from live objects in old spaces to new space.
2852 // It does not expect to encounter pointers to dead objects.
2853 class PointersUpdatingVisitor: public ObjectVisitor {
2855 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2857 void VisitPointer(Object** p) {
2861 void VisitPointers(Object** start, Object** end) {
2862 for (Object** p = start; p < end; p++) UpdatePointer(p);
2865 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2866 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2867 Object* target = rinfo->target_object();
2868 Object* old_target = target;
2869 VisitPointer(&target);
2870 // Avoid unnecessary changes that might unnecessary flush the instruction
2872 if (target != old_target) {
2873 rinfo->set_target_object(target);
2877 void VisitCodeTarget(RelocInfo* rinfo) {
2878 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2879 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2880 Object* old_target = target;
2881 VisitPointer(&target);
2882 if (target != old_target) {
2883 rinfo->set_target_address(Code::cast(target)->instruction_start());
2887 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2888 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2889 Object* stub = rinfo->code_age_stub();
2890 ASSERT(stub != NULL);
2891 VisitPointer(&stub);
2892 if (stub != rinfo->code_age_stub()) {
2893 rinfo->set_code_age_stub(Code::cast(stub));
2897 void VisitDebugTarget(RelocInfo* rinfo) {
2898 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2899 rinfo->IsPatchedReturnSequence()) ||
2900 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2901 rinfo->IsPatchedDebugBreakSlotSequence()));
2902 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2903 VisitPointer(&target);
2904 rinfo->set_call_address(Code::cast(target)->instruction_start());
2907 static inline void UpdateSlot(Heap* heap, Object** slot) {
2908 Object* obj = *slot;
2910 if (!obj->IsHeapObject()) return;
2912 HeapObject* heap_obj = HeapObject::cast(obj);
2914 MapWord map_word = heap_obj->map_word();
2915 if (map_word.IsForwardingAddress()) {
2916 ASSERT(heap->InFromSpace(heap_obj) ||
2917 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2918 HeapObject* target = map_word.ToForwardingAddress();
2920 ASSERT(!heap->InFromSpace(target) &&
2921 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2926 inline void UpdatePointer(Object** p) {
2927 UpdateSlot(heap_, p);
2934 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2935 ASSERT(*p == object);
2937 Address old_addr = object->address();
2939 Address new_addr = Memory::Address_at(old_addr);
2941 // The new space sweep will overwrite the map word of dead objects
2942 // with NULL. In this case we do not need to transfer this entry to
2943 // the store buffer which we are rebuilding.
2944 if (new_addr != NULL) {
2945 *p = HeapObject::FromAddress(new_addr);
2947 // We have to zap this pointer, because the store buffer may overflow later,
2948 // and then we have to scan the entire heap and we don't want to find
2949 // spurious newspace pointers in the old space.
2950 // TODO(mstarzinger): This was changed to a sentinel value to track down
2951 // rare crashes, change it back to Smi::FromInt(0) later.
2952 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
2957 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2959 MapWord map_word = HeapObject::cast(*p)->map_word();
2961 if (map_word.IsForwardingAddress()) {
2962 return String::cast(map_word.ToForwardingAddress());
2965 return String::cast(*p);
2969 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2971 // TODO(hpayer): Replace that check with an assert.
2972 CHECK(object_size <= Page::kMaxRegularHeapObjectSize);
2974 OldSpace* target_space = heap()->TargetSpace(object);
2976 ASSERT(target_space == heap()->old_pointer_space() ||
2977 target_space == heap()->old_data_space());
2979 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2980 if (maybe_result->ToObject(&result)) {
2981 HeapObject* target = HeapObject::cast(result);
2982 MigrateObject(target->address(),
2985 target_space->identity());
2986 heap()->mark_compact_collector()->tracer()->
2987 increment_promoted_objects_size(object_size);
2995 void MarkCompactCollector::EvacuateNewSpace() {
2996 // There are soft limits in the allocation code, designed trigger a mark
2997 // sweep collection by failing allocations. But since we are already in
2998 // a mark-sweep allocation, there is no sense in trying to trigger one.
2999 AlwaysAllocateScope scope;
3000 heap()->CheckNewSpaceExpansionCriteria();
3002 NewSpace* new_space = heap()->new_space();
3004 // Store allocation range before flipping semispaces.
3005 Address from_bottom = new_space->bottom();
3006 Address from_top = new_space->top();
3008 // Flip the semispaces. After flipping, to space is empty, from space has
3011 new_space->ResetAllocationInfo();
3013 int survivors_size = 0;
3015 // First pass: traverse all objects in inactive semispace, remove marks,
3016 // migrate live objects and write forwarding addresses. This stage puts
3017 // new entries in the store buffer and may cause some pages to be marked
3018 // scan-on-scavenge.
3019 NewSpacePageIterator it(from_bottom, from_top);
3020 while (it.has_next()) {
3021 NewSpacePage* p = it.next();
3022 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
3025 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3026 new_space->set_age_mark(new_space->top());
3030 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
3031 AlwaysAllocateScope always_allocate;
3032 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3033 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
3034 p->MarkSweptPrecisely();
3038 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3039 Address cell_base = it.CurrentCellBase();
3040 MarkBit::CellType* cell = it.CurrentCell();
3042 if (*cell == 0) continue;
3044 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3045 for (int i = 0; i < live_objects; i++) {
3046 Address object_addr = cell_base + offsets[i] * kPointerSize;
3047 HeapObject* object = HeapObject::FromAddress(object_addr);
3048 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3050 int size = object->Size();
3052 MaybeObject* target = space->AllocateRaw(size);
3053 if (target->IsFailure()) {
3054 // OS refused to give us memory.
3055 V8::FatalProcessOutOfMemory("Evacuation");
3059 Object* target_object = target->ToObjectUnchecked();
3061 MigrateObject(HeapObject::cast(target_object)->address(),
3065 ASSERT(object->map_word().IsForwardingAddress());
3068 // Clear marking bits for current cell.
3071 p->ResetLiveBytes();
3075 void MarkCompactCollector::EvacuatePages() {
3076 int npages = evacuation_candidates_.length();
3077 for (int i = 0; i < npages; i++) {
3078 Page* p = evacuation_candidates_[i];
3079 // TODO(hpayer): This check is just used for debugging purpose and
3080 // should be removed or turned into an assert after investigating the
3081 // crash in concurrent sweeping.
3082 CHECK(p->IsEvacuationCandidate() ||
3083 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3084 CHECK_EQ(static_cast<int>(p->parallel_sweeping()), 0);
3085 if (p->IsEvacuationCandidate()) {
3086 // During compaction we might have to request a new page.
3087 // Check that space still have room for that.
3088 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3089 EvacuateLiveObjectsFromPage(p);
3091 // Without room for expansion evacuation is not guaranteed to succeed.
3092 // Pessimistically abandon unevacuated pages.
3093 for (int j = i; j < npages; j++) {
3094 Page* page = evacuation_candidates_[j];
3095 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3096 page->ClearEvacuationCandidate();
3097 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3098 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
3107 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3109 virtual Object* RetainAs(Object* object) {
3110 if (object->IsHeapObject()) {
3111 HeapObject* heap_object = HeapObject::cast(object);
3112 MapWord map_word = heap_object->map_word();
3113 if (map_word.IsForwardingAddress()) {
3114 return map_word.ToForwardingAddress();
3122 static inline void UpdateSlot(Isolate* isolate,
3124 SlotsBuffer::SlotType slot_type,
3126 switch (slot_type) {
3127 case SlotsBuffer::CODE_TARGET_SLOT: {
3128 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3129 rinfo.Visit(isolate, v);
3132 case SlotsBuffer::CODE_ENTRY_SLOT: {
3133 v->VisitCodeEntry(addr);
3136 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3137 HeapObject* obj = HeapObject::FromAddress(addr);
3138 Code::cast(obj)->CodeIterateBody(v);
3141 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3142 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3143 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
3146 case SlotsBuffer::JS_RETURN_SLOT: {
3147 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3148 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
3151 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3152 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3153 rinfo.Visit(isolate, v);
3165 SWEEP_AND_VISIT_LIVE_OBJECTS
3169 enum SkipListRebuildingMode {
3175 // Sweep a space precisely. After this has been done the space can
3176 // be iterated precisely, hitting only the live objects. Code space
3177 // is always swept precisely because we want to be able to iterate
3178 // over it. Map space is swept precisely, because it is not compacted.
3179 // Slots in live objects pointing into evacuation candidates are updated
3181 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3182 static void SweepPrecisely(PagedSpace* space,
3185 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3186 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3187 space->identity() == CODE_SPACE);
3188 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3190 double start_time = 0.0;
3191 if (FLAG_print_cumulative_gc_stat) {
3192 start_time = OS::TimeCurrentMillis();
3195 p->MarkSweptPrecisely();
3197 Address free_start = p->area_start();
3198 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3201 SkipList* skip_list = p->skip_list();
3202 int curr_region = -1;
3203 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3207 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3208 Address cell_base = it.CurrentCellBase();
3209 MarkBit::CellType* cell = it.CurrentCell();
3210 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3212 for ( ; live_objects != 0; live_objects--) {
3213 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
3214 if (free_end != free_start) {
3215 space->Free(free_start, static_cast<int>(free_end - free_start));
3216 #ifdef ENABLE_GDB_JIT_INTERFACE
3217 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3218 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3222 HeapObject* live_object = HeapObject::FromAddress(free_end);
3223 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3224 Map* map = live_object->map();
3225 int size = live_object->SizeFromMap(map);
3226 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3227 live_object->IterateBody(map->instance_type(), size, v);
3229 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3230 int new_region_start =
3231 SkipList::RegionNumber(free_end);
3232 int new_region_end =
3233 SkipList::RegionNumber(free_end + size - kPointerSize);
3234 if (new_region_start != curr_region ||
3235 new_region_end != curr_region) {
3236 skip_list->AddObject(free_end, size);
3237 curr_region = new_region_end;
3240 free_start = free_end + size;
3242 // Clear marking bits for current cell.
3245 if (free_start != p->area_end()) {
3246 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3247 #ifdef ENABLE_GDB_JIT_INTERFACE
3248 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3249 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3253 p->ResetLiveBytes();
3254 if (FLAG_print_cumulative_gc_stat) {
3255 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3260 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3261 Page* p = Page::FromAddress(code->address());
3263 if (p->IsEvacuationCandidate() ||
3264 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3268 Address code_start = code->address();
3269 Address code_end = code_start + code->Size();
3271 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3272 uint32_t end_index =
3273 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3275 Bitmap* b = p->markbits();
3277 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3278 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3280 MarkBit::CellType* start_cell = start_mark_bit.cell();
3281 MarkBit::CellType* end_cell = end_mark_bit.cell();
3284 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3285 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3287 if (start_cell == end_cell) {
3288 *start_cell |= start_mask & end_mask;
3290 *start_cell |= start_mask;
3291 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3294 *end_cell |= end_mask;
3297 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3306 static bool IsOnInvalidatedCodeObject(Address addr) {
3307 // We did not record any slots in large objects thus
3308 // we can safely go to the page from the slot address.
3309 Page* p = Page::FromAddress(addr);
3311 // First check owner's identity because old pointer and old data spaces
3312 // are swept lazily and might still have non-zero mark-bits on some
3314 if (p->owner()->identity() != CODE_SPACE) return false;
3316 // In code space only bits on evacuation candidates (but we don't record
3317 // any slots on them) and under invalidated code objects are non-zero.
3319 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3321 return mark_bit.Get();
3325 void MarkCompactCollector::InvalidateCode(Code* code) {
3326 if (heap_->incremental_marking()->IsCompacting() &&
3327 !ShouldSkipEvacuationSlotRecording(code)) {
3328 ASSERT(compacting_);
3330 // If the object is white than no slots were recorded on it yet.
3331 MarkBit mark_bit = Marking::MarkBitFrom(code);
3332 if (Marking::IsWhite(mark_bit)) return;
3334 invalidated_code_.Add(code);
3339 // Return true if the given code is deoptimized or will be deoptimized.
3340 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3341 return code->marked_for_deoptimization();
3345 bool MarkCompactCollector::MarkInvalidatedCode() {
3346 bool code_marked = false;
3348 int length = invalidated_code_.length();
3349 for (int i = 0; i < length; i++) {
3350 Code* code = invalidated_code_[i];
3352 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3361 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3362 int length = invalidated_code_.length();
3363 for (int i = 0; i < length; i++) {
3364 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3369 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3370 int length = invalidated_code_.length();
3371 for (int i = 0; i < length; i++) {
3372 Code* code = invalidated_code_[i];
3374 code->Iterate(visitor);
3375 SetMarkBitsUnderInvalidatedCode(code, false);
3378 invalidated_code_.Rewind(0);
3382 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3383 Heap::RelocationLock relocation_lock(heap());
3385 bool code_slots_filtering_required;
3386 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3387 code_slots_filtering_required = MarkInvalidatedCode();
3391 // We have to travers our allocation sites scratchpad which contains raw
3392 // pointers before we move objects. During new space evacauation we
3393 // gathered pretenuring statistics. The found allocation sites may not be
3394 // valid after compacting old space.
3395 heap()->ProcessPretenuringFeedback();
3398 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3402 // Second pass: find pointers to new space and update them.
3403 PointersUpdatingVisitor updating_visitor(heap());
3405 { GCTracer::Scope gc_scope(tracer_,
3406 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3407 // Update pointers in to space.
3408 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3409 heap()->new_space()->top());
3410 for (HeapObject* object = to_it.Next();
3412 object = to_it.Next()) {
3413 Map* map = object->map();
3414 object->IterateBody(map->instance_type(),
3415 object->SizeFromMap(map),
3420 { GCTracer::Scope gc_scope(tracer_,
3421 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3423 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3426 { GCTracer::Scope gc_scope(tracer_,
3427 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3428 StoreBufferRebuildScope scope(heap_,
3429 heap_->store_buffer(),
3430 &Heap::ScavengeStoreBufferCallback);
3431 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3435 { GCTracer::Scope gc_scope(tracer_,
3436 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3437 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3438 migration_slots_buffer_,
3439 code_slots_filtering_required);
3440 if (FLAG_trace_fragmentation) {
3441 PrintF(" migration slots buffer: %d\n",
3442 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3445 if (compacting_ && was_marked_incrementally_) {
3446 // It's difficult to filter out slots recorded for large objects.
3447 LargeObjectIterator it(heap_->lo_space());
3448 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3449 // LargeObjectSpace is not swept yet thus we have to skip
3450 // dead objects explicitly.
3451 if (!IsMarked(obj)) continue;
3453 Page* p = Page::FromAddress(obj->address());
3454 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3455 obj->Iterate(&updating_visitor);
3456 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3462 int npages = evacuation_candidates_.length();
3463 { GCTracer::Scope gc_scope(
3464 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3465 for (int i = 0; i < npages; i++) {
3466 Page* p = evacuation_candidates_[i];
3467 ASSERT(p->IsEvacuationCandidate() ||
3468 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3470 if (p->IsEvacuationCandidate()) {
3471 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3473 code_slots_filtering_required);
3474 if (FLAG_trace_fragmentation) {
3475 PrintF(" page %p slots buffer: %d\n",
3476 reinterpret_cast<void*>(p),
3477 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3480 // Important: skip list should be cleared only after roots were updated
3481 // because root iteration traverses the stack and might have to find
3482 // code objects from non-updated pc pointing into evacuation candidate.
3483 SkipList* list = p->skip_list();
3484 if (list != NULL) list->Clear();
3486 if (FLAG_gc_verbose) {
3487 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3488 reinterpret_cast<intptr_t>(p));
3490 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3491 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3493 switch (space->identity()) {
3494 case OLD_DATA_SPACE:
3495 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
3497 case OLD_POINTER_SPACE:
3498 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3499 space, p, &updating_visitor);
3502 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3503 space, p, &updating_visitor);
3513 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3515 // Update pointers from cells.
3516 HeapObjectIterator cell_iterator(heap_->cell_space());
3517 for (HeapObject* cell = cell_iterator.Next();
3519 cell = cell_iterator.Next()) {
3520 if (cell->IsCell()) {
3521 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3525 HeapObjectIterator js_global_property_cell_iterator(
3526 heap_->property_cell_space());
3527 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3529 cell = js_global_property_cell_iterator.Next()) {
3530 if (cell->IsPropertyCell()) {
3531 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
3535 // Update the head of the native contexts list in the heap.
3536 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
3538 heap_->string_table()->Iterate(&updating_visitor);
3539 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3540 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3541 WeakHashTable* table =
3542 WeakHashTable::cast(heap_->weak_object_to_code_table());
3543 table->Iterate(&updating_visitor);
3544 table->Rehash(heap_->undefined_value());
3547 // Update pointers from external string table.
3548 heap_->UpdateReferencesInExternalStringTable(
3549 &UpdateReferenceInExternalStringTableEntry);
3551 EvacuationWeakObjectRetainer evacuation_object_retainer;
3552 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3554 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3556 ProcessInvalidatedCode(&updating_visitor);
3558 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3561 if (FLAG_verify_heap) {
3562 VerifyEvacuation(heap_);
3566 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3567 ASSERT(migration_slots_buffer_ == NULL);
3571 void MarkCompactCollector::UnlinkEvacuationCandidates() {
3572 int npages = evacuation_candidates_.length();
3573 for (int i = 0; i < npages; i++) {
3574 Page* p = evacuation_candidates_[i];
3575 if (!p->IsEvacuationCandidate()) continue;
3577 p->ClearSweptPrecisely();
3578 p->ClearSweptConservatively();
3583 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3584 int npages = evacuation_candidates_.length();
3585 for (int i = 0; i < npages; i++) {
3586 Page* p = evacuation_candidates_[i];
3587 if (!p->IsEvacuationCandidate()) continue;
3588 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3589 space->Free(p->area_start(), p->area_size());
3590 p->set_scan_on_scavenge(false);
3591 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3592 p->ResetLiveBytes();
3593 space->ReleasePage(p, false);
3595 evacuation_candidates_.Rewind(0);
3596 compacting_ = false;
3597 heap()->FreeQueuedChunks();
3601 static const int kStartTableEntriesPerLine = 5;
3602 static const int kStartTableLines = 171;
3603 static const int kStartTableInvalidLine = 127;
3604 static const int kStartTableUnusedEntry = 126;
3606 #define _ kStartTableUnusedEntry
3607 #define X kStartTableInvalidLine
3608 // Mark-bit to object start offset table.
3610 // The line is indexed by the mark bits in a byte. The first number on
3611 // the line describes the number of live object starts for the line and the
3612 // other numbers on the line describe the offsets (in words) of the object
3615 // Since objects are at least 2 words large we don't have entries for two
3616 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3617 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3628 2, 1, 3, _, _, // 10
3629 X, _, _, _, _, // 11
3630 X, _, _, _, _, // 12
3631 X, _, _, _, _, // 13
3632 X, _, _, _, _, // 14
3633 X, _, _, _, _, // 15
3634 1, 4, _, _, _, // 16
3635 2, 0, 4, _, _, // 17
3636 2, 1, 4, _, _, // 18
3637 X, _, _, _, _, // 19
3638 2, 2, 4, _, _, // 20
3639 3, 0, 2, 4, _, // 21
3640 X, _, _, _, _, // 22
3641 X, _, _, _, _, // 23
3642 X, _, _, _, _, // 24
3643 X, _, _, _, _, // 25
3644 X, _, _, _, _, // 26
3645 X, _, _, _, _, // 27
3646 X, _, _, _, _, // 28
3647 X, _, _, _, _, // 29
3648 X, _, _, _, _, // 30
3649 X, _, _, _, _, // 31
3650 1, 5, _, _, _, // 32
3651 2, 0, 5, _, _, // 33
3652 2, 1, 5, _, _, // 34
3653 X, _, _, _, _, // 35
3654 2, 2, 5, _, _, // 36
3655 3, 0, 2, 5, _, // 37
3656 X, _, _, _, _, // 38
3657 X, _, _, _, _, // 39
3658 2, 3, 5, _, _, // 40
3659 3, 0, 3, 5, _, // 41
3660 3, 1, 3, 5, _, // 42
3661 X, _, _, _, _, // 43
3662 X, _, _, _, _, // 44
3663 X, _, _, _, _, // 45
3664 X, _, _, _, _, // 46
3665 X, _, _, _, _, // 47
3666 X, _, _, _, _, // 48
3667 X, _, _, _, _, // 49
3668 X, _, _, _, _, // 50
3669 X, _, _, _, _, // 51
3670 X, _, _, _, _, // 52
3671 X, _, _, _, _, // 53
3672 X, _, _, _, _, // 54
3673 X, _, _, _, _, // 55
3674 X, _, _, _, _, // 56
3675 X, _, _, _, _, // 57
3676 X, _, _, _, _, // 58
3677 X, _, _, _, _, // 59
3678 X, _, _, _, _, // 60
3679 X, _, _, _, _, // 61
3680 X, _, _, _, _, // 62
3681 X, _, _, _, _, // 63
3682 1, 6, _, _, _, // 64
3683 2, 0, 6, _, _, // 65
3684 2, 1, 6, _, _, // 66
3685 X, _, _, _, _, // 67
3686 2, 2, 6, _, _, // 68
3687 3, 0, 2, 6, _, // 69
3688 X, _, _, _, _, // 70
3689 X, _, _, _, _, // 71
3690 2, 3, 6, _, _, // 72
3691 3, 0, 3, 6, _, // 73
3692 3, 1, 3, 6, _, // 74
3693 X, _, _, _, _, // 75
3694 X, _, _, _, _, // 76
3695 X, _, _, _, _, // 77
3696 X, _, _, _, _, // 78
3697 X, _, _, _, _, // 79
3698 2, 4, 6, _, _, // 80
3699 3, 0, 4, 6, _, // 81
3700 3, 1, 4, 6, _, // 82
3701 X, _, _, _, _, // 83
3702 3, 2, 4, 6, _, // 84
3703 4, 0, 2, 4, 6, // 85
3704 X, _, _, _, _, // 86
3705 X, _, _, _, _, // 87
3706 X, _, _, _, _, // 88
3707 X, _, _, _, _, // 89
3708 X, _, _, _, _, // 90
3709 X, _, _, _, _, // 91
3710 X, _, _, _, _, // 92
3711 X, _, _, _, _, // 93
3712 X, _, _, _, _, // 94
3713 X, _, _, _, _, // 95
3714 X, _, _, _, _, // 96
3715 X, _, _, _, _, // 97
3716 X, _, _, _, _, // 98
3717 X, _, _, _, _, // 99
3718 X, _, _, _, _, // 100
3719 X, _, _, _, _, // 101
3720 X, _, _, _, _, // 102
3721 X, _, _, _, _, // 103
3722 X, _, _, _, _, // 104
3723 X, _, _, _, _, // 105
3724 X, _, _, _, _, // 106
3725 X, _, _, _, _, // 107
3726 X, _, _, _, _, // 108
3727 X, _, _, _, _, // 109
3728 X, _, _, _, _, // 110
3729 X, _, _, _, _, // 111
3730 X, _, _, _, _, // 112
3731 X, _, _, _, _, // 113
3732 X, _, _, _, _, // 114
3733 X, _, _, _, _, // 115
3734 X, _, _, _, _, // 116
3735 X, _, _, _, _, // 117
3736 X, _, _, _, _, // 118
3737 X, _, _, _, _, // 119
3738 X, _, _, _, _, // 120
3739 X, _, _, _, _, // 121
3740 X, _, _, _, _, // 122
3741 X, _, _, _, _, // 123
3742 X, _, _, _, _, // 124
3743 X, _, _, _, _, // 125
3744 X, _, _, _, _, // 126
3745 X, _, _, _, _, // 127
3746 1, 7, _, _, _, // 128
3747 2, 0, 7, _, _, // 129
3748 2, 1, 7, _, _, // 130
3749 X, _, _, _, _, // 131
3750 2, 2, 7, _, _, // 132
3751 3, 0, 2, 7, _, // 133
3752 X, _, _, _, _, // 134
3753 X, _, _, _, _, // 135
3754 2, 3, 7, _, _, // 136
3755 3, 0, 3, 7, _, // 137
3756 3, 1, 3, 7, _, // 138
3757 X, _, _, _, _, // 139
3758 X, _, _, _, _, // 140
3759 X, _, _, _, _, // 141
3760 X, _, _, _, _, // 142
3761 X, _, _, _, _, // 143
3762 2, 4, 7, _, _, // 144
3763 3, 0, 4, 7, _, // 145
3764 3, 1, 4, 7, _, // 146
3765 X, _, _, _, _, // 147
3766 3, 2, 4, 7, _, // 148
3767 4, 0, 2, 4, 7, // 149
3768 X, _, _, _, _, // 150
3769 X, _, _, _, _, // 151
3770 X, _, _, _, _, // 152
3771 X, _, _, _, _, // 153
3772 X, _, _, _, _, // 154
3773 X, _, _, _, _, // 155
3774 X, _, _, _, _, // 156
3775 X, _, _, _, _, // 157
3776 X, _, _, _, _, // 158
3777 X, _, _, _, _, // 159
3778 2, 5, 7, _, _, // 160
3779 3, 0, 5, 7, _, // 161
3780 3, 1, 5, 7, _, // 162
3781 X, _, _, _, _, // 163
3782 3, 2, 5, 7, _, // 164
3783 4, 0, 2, 5, 7, // 165
3784 X, _, _, _, _, // 166
3785 X, _, _, _, _, // 167
3786 3, 3, 5, 7, _, // 168
3787 4, 0, 3, 5, 7, // 169
3788 4, 1, 3, 5, 7 // 170
3794 // Takes a word of mark bits. Returns the number of objects that start in the
3795 // range. Puts the offsets of the words in the supplied array.
3796 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3800 // No consecutive 1 bits.
3801 ASSERT((mark_bits & 0x180) != 0x180);
3802 ASSERT((mark_bits & 0x18000) != 0x18000);
3803 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3805 while (mark_bits != 0) {
3806 int byte = (mark_bits & 0xff);
3809 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3810 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3811 int objects_in_these_8_words = table[0];
3812 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3813 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3814 for (int i = 0; i < objects_in_these_8_words; i++) {
3815 starts[objects++] = offset + table[1 + i];
3824 static inline Address DigestFreeStart(Address approximate_free_start,
3825 uint32_t free_start_cell) {
3826 ASSERT(free_start_cell != 0);
3828 // No consecutive 1 bits.
3829 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3832 uint32_t cell = free_start_cell;
3833 int offset_of_last_live;
3834 if ((cell & 0x80000000u) != 0) {
3835 // This case would overflow below.
3836 offset_of_last_live = 31;
3838 // Remove all but one bit, the most significant. This is an optimization
3839 // that may or may not be worthwhile.
3845 cell = (cell + 1) >> 1;
3846 int live_objects = MarkWordToObjectStarts(cell, offsets);
3847 ASSERT(live_objects == 1);
3848 offset_of_last_live = offsets[live_objects - 1];
3850 Address last_live_start =
3851 approximate_free_start + offset_of_last_live * kPointerSize;
3852 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3853 Address free_start = last_live_start + last_live->Size();
3858 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3861 // No consecutive 1 bits.
3862 ASSERT((cell & (cell << 1)) == 0);
3865 if (cell == 0x80000000u) { // Avoid overflow below.
3866 return block_address + 31 * kPointerSize;
3868 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3869 ASSERT((first_set_bit & cell) == first_set_bit);
3870 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3871 ASSERT(live_objects == 1);
3873 return block_address + offsets[0] * kPointerSize;
3877 template<MarkCompactCollector::SweepingParallelism mode>
3878 static intptr_t Free(PagedSpace* space,
3879 FreeList* free_list,
3882 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3883 return space->Free(start, size);
3885 return size - free_list->Free(start, size);
3890 // Force instantiation of templatized SweepConservatively method for
3891 // SWEEP_SEQUENTIALLY mode.
3892 template intptr_t MarkCompactCollector::
3893 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3894 PagedSpace*, FreeList*, Page*);
3897 // Force instantiation of templatized SweepConservatively method for
3898 // SWEEP_IN_PARALLEL mode.
3899 template intptr_t MarkCompactCollector::
3900 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3901 PagedSpace*, FreeList*, Page*);
3904 // Sweeps a space conservatively. After this has been done the larger free
3905 // spaces have been put on the free list and the smaller ones have been
3906 // ignored and left untouched. A free space is always either ignored or put
3907 // on the free list, never split up into two parts. This is important
3908 // because it means that any FreeSpace maps left actually describe a region of
3909 // memory that can be ignored when scanning. Dead objects other than free
3910 // spaces will not contain the free space map.
3911 template<MarkCompactCollector::SweepingParallelism mode>
3912 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3913 FreeList* free_list,
3915 // TODO(hpayer): This check is just used for debugging purpose and
3916 // should be removed or turned into an assert after investigating the
3917 // crash in concurrent sweeping.
3918 CHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3919 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3920 free_list != NULL) ||
3921 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3922 free_list == NULL));
3924 p->MarkSweptConservatively();
3926 intptr_t freed_bytes = 0;
3929 // Skip over all the dead objects at the start of the page and mark them free.
3930 Address cell_base = 0;
3931 MarkBit::CellType* cell = NULL;
3932 MarkBitCellIterator it(p);
3933 for (; !it.Done(); it.Advance()) {
3934 cell_base = it.CurrentCellBase();
3935 cell = it.CurrentCell();
3936 if (*cell != 0) break;
3940 size = p->area_end() - p->area_start();
3941 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3942 static_cast<int>(size));
3943 ASSERT_EQ(0, p->LiveBytes());
3947 // Grow the size of the start-of-page free space a little to get up to the
3948 // first live object.
3949 Address free_end = StartOfLiveObject(cell_base, *cell);
3950 // Free the first free space.
3951 size = free_end - p->area_start();
3952 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3953 static_cast<int>(size));
3955 // The start of the current free area is represented in undigested form by
3956 // the address of the last 32-word section that contained a live object and
3957 // the marking bitmap for that cell, which describes where the live object
3958 // started. Unless we find a large free space in the bitmap we will not
3959 // digest this pair into a real address. We start the iteration here at the
3960 // first word in the marking bit map that indicates a live object.
3961 Address free_start = cell_base;
3962 MarkBit::CellType free_start_cell = *cell;
3964 for (; !it.Done(); it.Advance()) {
3965 cell_base = it.CurrentCellBase();
3966 cell = it.CurrentCell();
3968 // We have a live object. Check approximately whether it is more than 32
3969 // words since the last live object.
3970 if (cell_base - free_start > 32 * kPointerSize) {
3971 free_start = DigestFreeStart(free_start, free_start_cell);
3972 if (cell_base - free_start > 32 * kPointerSize) {
3973 // Now that we know the exact start of the free space it still looks
3974 // like we have a large enough free space to be worth bothering with.
3975 // so now we need to find the start of the first live object at the
3976 // end of the free space.
3977 free_end = StartOfLiveObject(cell_base, *cell);
3978 freed_bytes += Free<mode>(space, free_list, free_start,
3979 static_cast<int>(free_end - free_start));
3982 // Update our undigested record of where the current free area started.
3983 free_start = cell_base;
3984 free_start_cell = *cell;
3985 // Clear marking bits for current cell.
3990 // Handle the free space at the end of the page.
3991 if (cell_base - free_start > 32 * kPointerSize) {
3992 free_start = DigestFreeStart(free_start, free_start_cell);
3993 freed_bytes += Free<mode>(space, free_list, free_start,
3994 static_cast<int>(p->area_end() - free_start));
3997 p->ResetLiveBytes();
4002 void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
4003 PageIterator it(space);
4004 FreeList* free_list = space == heap()->old_pointer_space()
4005 ? free_list_old_pointer_space_.get()
4006 : free_list_old_data_space_.get();
4007 FreeList private_free_list(space);
4008 while (it.has_next()) {
4009 Page* p = it.next();
4011 if (p->TryParallelSweeping()) {
4012 SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
4013 free_list->Concatenate(&private_free_list);
4019 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4020 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
4021 sweeper == LAZY_CONSERVATIVE ||
4022 sweeper == PARALLEL_CONSERVATIVE ||
4023 sweeper == CONCURRENT_CONSERVATIVE);
4024 space->ClearStats();
4026 PageIterator it(space);
4028 int pages_swept = 0;
4029 bool lazy_sweeping_active = false;
4030 bool unused_page_present = false;
4031 bool parallel_sweeping_active = false;
4033 while (it.has_next()) {
4034 Page* p = it.next();
4036 ASSERT(p->parallel_sweeping() == 0);
4037 ASSERT(!p->IsEvacuationCandidate());
4039 // Clear sweeping flags indicating that marking bits are still intact.
4040 p->ClearSweptPrecisely();
4041 p->ClearSweptConservatively();
4043 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
4044 // Will be processed in EvacuateNewSpaceAndCandidates.
4045 ASSERT(evacuation_candidates_.length() > 0);
4049 // One unused page is kept, all further are released before sweeping them.
4050 if (p->LiveBytes() == 0) {
4051 if (unused_page_present) {
4052 if (FLAG_gc_verbose) {
4053 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4054 reinterpret_cast<intptr_t>(p));
4056 // Adjust unswept free bytes because releasing a page expects said
4057 // counter to be accurate for unswept pages.
4058 space->IncreaseUnsweptFreeBytes(p);
4059 space->ReleasePage(p, true);
4062 unused_page_present = true;
4066 case CONSERVATIVE: {
4067 if (FLAG_gc_verbose) {
4068 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4069 reinterpret_cast<intptr_t>(p));
4071 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4075 case LAZY_CONSERVATIVE: {
4076 if (lazy_sweeping_active) {
4077 if (FLAG_gc_verbose) {
4078 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4079 reinterpret_cast<intptr_t>(p));
4081 space->IncreaseUnsweptFreeBytes(p);
4083 if (FLAG_gc_verbose) {
4084 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4085 reinterpret_cast<intptr_t>(p));
4087 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4089 space->SetPagesToSweep(p->next_page());
4090 lazy_sweeping_active = true;
4094 case CONCURRENT_CONSERVATIVE:
4095 case PARALLEL_CONSERVATIVE: {
4096 if (!parallel_sweeping_active) {
4097 if (FLAG_gc_verbose) {
4098 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4099 reinterpret_cast<intptr_t>(p));
4101 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4103 parallel_sweeping_active = true;
4105 if (FLAG_gc_verbose) {
4106 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4107 reinterpret_cast<intptr_t>(p));
4109 p->set_parallel_sweeping(1);
4110 space->IncreaseUnsweptFreeBytes(p);
4115 if (FLAG_gc_verbose) {
4116 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4117 reinterpret_cast<intptr_t>(p));
4119 if (space->identity() == CODE_SPACE) {
4120 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
4122 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
4133 if (FLAG_gc_verbose) {
4134 PrintF("SweepSpace: %s (%d pages swept)\n",
4135 AllocationSpaceName(space->identity()),
4139 // Give pages that are queued to be freed back to the OS.
4140 heap()->FreeQueuedChunks();
4144 void MarkCompactCollector::SweepSpaces() {
4145 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4147 state_ = SWEEP_SPACES;
4149 SweeperType how_to_sweep =
4150 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
4151 if (isolate()->num_sweeper_threads() > 0) {
4152 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4153 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4155 if (sweep_precisely_) how_to_sweep = PRECISE;
4157 // Unlink evacuation candidates before sweeper threads access the list of
4158 // pages to avoid race condition.
4159 UnlinkEvacuationCandidates();
4161 // Noncompacting collections simply sweep the spaces to clear the mark
4162 // bits and free the nonlive blocks (for old and map spaces). We sweep
4163 // the map space last because freeing non-live maps overwrites them and
4164 // the other spaces rely on possibly non-live maps to get the sizes for
4165 // non-live objects.
4166 SequentialSweepingScope scope(this);
4167 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4168 SweepSpace(heap()->old_data_space(), how_to_sweep);
4170 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4171 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4172 // TODO(hpayer): fix race with concurrent sweeper
4173 StartSweeperThreads();
4176 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4177 WaitUntilSweepingCompleted();
4180 RemoveDeadInvalidatedCode();
4181 SweepSpace(heap()->code_space(), PRECISE);
4183 SweepSpace(heap()->cell_space(), PRECISE);
4184 SweepSpace(heap()->property_cell_space(), PRECISE);
4186 EvacuateNewSpaceAndCandidates();
4188 // ClearNonLiveTransitions depends on precise sweeping of map space to
4189 // detect whether unmarked map became dead in this collection or in one
4190 // of the previous ones.
4191 SweepSpace(heap()->map_space(), PRECISE);
4193 // Deallocate unmarked objects and clear marked bits for marked objects.
4194 heap_->lo_space()->FreeUnmarkedObjects();
4196 // Deallocate evacuated candidate pages.
4197 ReleaseEvacuationCandidates();
4201 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
4202 #ifdef ENABLE_DEBUGGER_SUPPORT
4203 if (isolate()->debug()->IsLoaded() ||
4204 isolate()->debug()->has_break_points()) {
4210 if (code_flusher_ != NULL) return;
4211 code_flusher_ = new CodeFlusher(isolate());
4213 if (code_flusher_ == NULL) return;
4214 code_flusher_->EvictAllCandidates();
4215 delete code_flusher_;
4216 code_flusher_ = NULL;
4219 if (FLAG_trace_code_flushing) {
4220 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4225 // TODO(1466) ReportDeleteIfNeeded is not called currently.
4226 // Our profiling tools do not expect intersections between
4227 // code objects. We should either reenable it or change our tools.
4228 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4230 #ifdef ENABLE_GDB_JIT_INTERFACE
4231 if (obj->IsCode()) {
4232 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4235 if (obj->IsCode()) {
4236 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4241 Isolate* MarkCompactCollector::isolate() const {
4242 return heap_->isolate();
4246 void MarkCompactCollector::Initialize() {
4247 MarkCompactMarkingVisitor::Initialize();
4248 IncrementalMarking::Initialize();
4252 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4253 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4257 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4258 SlotsBuffer** buffer_address,
4261 AdditionMode mode) {
4262 SlotsBuffer* buffer = *buffer_address;
4263 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4264 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4265 allocator->DeallocateChain(buffer_address);
4268 buffer = allocator->AllocateBuffer(buffer);
4269 *buffer_address = buffer;
4271 ASSERT(buffer->HasSpaceForTypedSlot());
4272 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4273 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4278 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4279 if (RelocInfo::IsCodeTarget(rmode)) {
4280 return SlotsBuffer::CODE_TARGET_SLOT;
4281 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4282 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4283 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4284 return SlotsBuffer::DEBUG_TARGET_SLOT;
4285 } else if (RelocInfo::IsJSReturn(rmode)) {
4286 return SlotsBuffer::JS_RETURN_SLOT;
4289 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4293 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4294 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4295 if (target_page->IsEvacuationCandidate() &&
4296 (rinfo->host() == NULL ||
4297 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4298 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4299 target_page->slots_buffer_address(),
4300 SlotTypeForRMode(rinfo->rmode()),
4302 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4303 EvictEvacuationCandidate(target_page);
4309 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4310 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4311 if (target_page->IsEvacuationCandidate() &&
4312 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4313 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4314 target_page->slots_buffer_address(),
4315 SlotsBuffer::CODE_ENTRY_SLOT,
4317 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4318 EvictEvacuationCandidate(target_page);
4324 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4325 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4326 if (is_compacting()) {
4327 Code* host = isolate()->inner_pointer_to_code_cache()->
4328 GcSafeFindCodeForInnerPointer(pc);
4329 MarkBit mark_bit = Marking::MarkBitFrom(host);
4330 if (Marking::IsBlack(mark_bit)) {
4331 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4332 RecordRelocSlot(&rinfo, target);
4338 static inline SlotsBuffer::SlotType DecodeSlotType(
4339 SlotsBuffer::ObjectSlot slot) {
4340 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4344 void SlotsBuffer::UpdateSlots(Heap* heap) {
4345 PointersUpdatingVisitor v(heap);
4347 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4348 ObjectSlot slot = slots_[slot_idx];
4349 if (!IsTypedSlot(slot)) {
4350 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4353 ASSERT(slot_idx < idx_);
4354 UpdateSlot(heap->isolate(),
4356 DecodeSlotType(slot),
4357 reinterpret_cast<Address>(slots_[slot_idx]));
4363 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4364 PointersUpdatingVisitor v(heap);
4366 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4367 ObjectSlot slot = slots_[slot_idx];
4368 if (!IsTypedSlot(slot)) {
4369 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4370 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4374 ASSERT(slot_idx < idx_);
4375 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4376 if (!IsOnInvalidatedCodeObject(pc)) {
4377 UpdateSlot(heap->isolate(),
4379 DecodeSlotType(slot),
4380 reinterpret_cast<Address>(slots_[slot_idx]));
4387 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4388 return new SlotsBuffer(next_buffer);
4392 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4397 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4398 SlotsBuffer* buffer = *buffer_address;
4399 while (buffer != NULL) {
4400 SlotsBuffer* next_buffer = buffer->next();
4401 DeallocateBuffer(buffer);
4402 buffer = next_buffer;
4404 *buffer_address = NULL;
4408 } } // namespace v8::internal