1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
33 #include "execution.h"
35 #include "global-handles.h"
36 #include "heap-profiler.h"
38 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
55 // -------------------------------------------------------------------------
56 // MarkCompactCollector
58 MarkCompactCollector::MarkCompactCollector() : // NOLINT
62 sweep_precisely_(false),
63 reduce_memory_footprint_(false),
64 abort_incremental_marking_(false),
66 was_marked_incrementally_(false),
67 flush_monomorphic_ics_(false),
69 migration_slots_buffer_(NULL),
72 encountered_weak_maps_(NULL),
73 marker_(this, this) { }
77 class VerifyMarkingVisitor: public ObjectVisitor {
79 void VisitPointers(Object** start, Object** end) {
80 for (Object** current = start; current < end; current++) {
81 if ((*current)->IsHeapObject()) {
82 HeapObject* object = HeapObject::cast(*current);
83 ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
90 static void VerifyMarking(Address bottom, Address top) {
91 VerifyMarkingVisitor visitor;
93 Address next_object_must_be_here_or_later = bottom;
95 for (Address current = bottom;
97 current += kPointerSize) {
98 object = HeapObject::FromAddress(current);
99 if (MarkCompactCollector::IsMarked(object)) {
100 ASSERT(current >= next_object_must_be_here_or_later);
101 object->Iterate(&visitor);
102 next_object_must_be_here_or_later = current + object->Size();
108 static void VerifyMarking(NewSpace* space) {
109 Address end = space->top();
110 NewSpacePageIterator it(space->bottom(), end);
111 // The bottom position is at the start of its page. Allows us to use
112 // page->area_start() as start of range on all pages.
113 ASSERT_EQ(space->bottom(),
114 NewSpacePage::FromAddress(space->bottom())->area_start());
115 while (it.has_next()) {
116 NewSpacePage* page = it.next();
117 Address limit = it.has_next() ? page->area_end() : end;
118 ASSERT(limit == end || !page->Contains(end));
119 VerifyMarking(page->area_start(), limit);
124 static void VerifyMarking(PagedSpace* space) {
125 PageIterator it(space);
127 while (it.has_next()) {
129 VerifyMarking(p->area_start(), p->area_end());
134 static void VerifyMarking(Heap* heap) {
135 VerifyMarking(heap->old_pointer_space());
136 VerifyMarking(heap->old_data_space());
137 VerifyMarking(heap->code_space());
138 VerifyMarking(heap->cell_space());
139 VerifyMarking(heap->map_space());
140 VerifyMarking(heap->new_space());
142 VerifyMarkingVisitor visitor;
144 LargeObjectIterator it(heap->lo_space());
145 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
146 if (MarkCompactCollector::IsMarked(obj)) {
147 obj->Iterate(&visitor);
151 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
155 class VerifyEvacuationVisitor: public ObjectVisitor {
157 void VisitPointers(Object** start, Object** end) {
158 for (Object** current = start; current < end; current++) {
159 if ((*current)->IsHeapObject()) {
160 HeapObject* object = HeapObject::cast(*current);
161 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
168 static void VerifyEvacuation(Address bottom, Address top) {
169 VerifyEvacuationVisitor visitor;
171 Address next_object_must_be_here_or_later = bottom;
173 for (Address current = bottom;
175 current += kPointerSize) {
176 object = HeapObject::FromAddress(current);
177 if (MarkCompactCollector::IsMarked(object)) {
178 ASSERT(current >= next_object_must_be_here_or_later);
179 object->Iterate(&visitor);
180 next_object_must_be_here_or_later = current + object->Size();
186 static void VerifyEvacuation(NewSpace* space) {
187 NewSpacePageIterator it(space->bottom(), space->top());
188 VerifyEvacuationVisitor visitor;
190 while (it.has_next()) {
191 NewSpacePage* page = it.next();
192 Address current = page->area_start();
193 Address limit = it.has_next() ? page->area_end() : space->top();
194 ASSERT(limit == space->top() || !page->Contains(space->top()));
195 while (current < limit) {
196 HeapObject* object = HeapObject::FromAddress(current);
197 object->Iterate(&visitor);
198 current += object->Size();
204 static void VerifyEvacuation(PagedSpace* space) {
205 PageIterator it(space);
207 while (it.has_next()) {
209 if (p->IsEvacuationCandidate()) continue;
210 VerifyEvacuation(p->area_start(), p->area_end());
215 static void VerifyEvacuation(Heap* heap) {
216 VerifyEvacuation(heap->old_pointer_space());
217 VerifyEvacuation(heap->old_data_space());
218 VerifyEvacuation(heap->code_space());
219 VerifyEvacuation(heap->cell_space());
220 VerifyEvacuation(heap->map_space());
221 VerifyEvacuation(heap->new_space());
223 VerifyEvacuationVisitor visitor;
224 heap->IterateStrongRoots(&visitor, VISIT_ALL);
229 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
230 p->MarkEvacuationCandidate();
231 evacuation_candidates_.Add(p);
235 static void TraceFragmentation(PagedSpace* space) {
236 int number_of_pages = space->CountTotalPages();
237 intptr_t reserved = (number_of_pages * space->AreaSize());
238 intptr_t free = reserved - space->SizeOfObjects();
239 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
240 AllocationSpaceName(space->identity()),
242 static_cast<int>(free),
243 static_cast<double>(free) * 100 / reserved);
247 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
249 ASSERT(evacuation_candidates_.length() == 0);
251 CollectEvacuationCandidates(heap()->old_pointer_space());
252 CollectEvacuationCandidates(heap()->old_data_space());
254 if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
255 CollectEvacuationCandidates(heap()->code_space());
256 } else if (FLAG_trace_fragmentation) {
257 TraceFragmentation(heap()->code_space());
260 if (FLAG_trace_fragmentation) {
261 TraceFragmentation(heap()->map_space());
262 TraceFragmentation(heap()->cell_space());
265 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
266 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
267 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
269 compacting_ = evacuation_candidates_.length() > 0;
276 void MarkCompactCollector::CollectGarbage() {
277 // Make sure that Prepare() has been called. The individual steps below will
278 // update the state as they proceed.
279 ASSERT(state_ == PREPARE_GC);
280 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
283 ASSERT(heap_->incremental_marking()->IsStopped());
285 if (FLAG_collect_maps) ClearNonLiveTransitions();
290 if (FLAG_verify_heap) {
291 VerifyMarking(heap_);
297 if (!FLAG_collect_maps) ReattachInitialMaps();
306 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
307 PageIterator it(space);
309 while (it.has_next()) {
311 CHECK(p->markbits()->IsClean());
312 CHECK_EQ(0, p->LiveBytes());
316 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
317 NewSpacePageIterator it(space->bottom(), space->top());
319 while (it.has_next()) {
320 NewSpacePage* p = it.next();
321 CHECK(p->markbits()->IsClean());
322 CHECK_EQ(0, p->LiveBytes());
326 void MarkCompactCollector::VerifyMarkbitsAreClean() {
327 VerifyMarkbitsAreClean(heap_->old_pointer_space());
328 VerifyMarkbitsAreClean(heap_->old_data_space());
329 VerifyMarkbitsAreClean(heap_->code_space());
330 VerifyMarkbitsAreClean(heap_->cell_space());
331 VerifyMarkbitsAreClean(heap_->map_space());
332 VerifyMarkbitsAreClean(heap_->new_space());
334 LargeObjectIterator it(heap_->lo_space());
335 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
336 MarkBit mark_bit = Marking::MarkBitFrom(obj);
337 ASSERT(Marking::IsWhite(mark_bit));
338 ASSERT_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
344 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
345 PageIterator it(space);
347 while (it.has_next()) {
348 Bitmap::Clear(it.next());
353 static void ClearMarkbitsInNewSpace(NewSpace* space) {
354 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
356 while (it.has_next()) {
357 Bitmap::Clear(it.next());
362 void MarkCompactCollector::ClearMarkbits() {
363 ClearMarkbitsInPagedSpace(heap_->code_space());
364 ClearMarkbitsInPagedSpace(heap_->map_space());
365 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
366 ClearMarkbitsInPagedSpace(heap_->old_data_space());
367 ClearMarkbitsInPagedSpace(heap_->cell_space());
368 ClearMarkbitsInNewSpace(heap_->new_space());
370 LargeObjectIterator it(heap_->lo_space());
371 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
372 MarkBit mark_bit = Marking::MarkBitFrom(obj);
374 mark_bit.Next().Clear();
375 Page::FromAddress(obj->address())->ResetLiveBytes();
380 bool Marking::TransferMark(Address old_start, Address new_start) {
381 // This is only used when resizing an object.
382 ASSERT(MemoryChunk::FromAddress(old_start) ==
383 MemoryChunk::FromAddress(new_start));
385 // If the mark doesn't move, we don't check the color of the object.
386 // It doesn't matter whether the object is black, since it hasn't changed
387 // size, so the adjustment to the live data count will be zero anyway.
388 if (old_start == new_start) return false;
390 MarkBit new_mark_bit = MarkBitFrom(new_start);
391 MarkBit old_mark_bit = MarkBitFrom(old_start);
394 ObjectColor old_color = Color(old_mark_bit);
397 if (Marking::IsBlack(old_mark_bit)) {
398 old_mark_bit.Clear();
399 ASSERT(IsWhite(old_mark_bit));
400 Marking::MarkBlack(new_mark_bit);
402 } else if (Marking::IsGrey(old_mark_bit)) {
403 ASSERT(heap_->incremental_marking()->IsMarking());
404 old_mark_bit.Clear();
405 old_mark_bit.Next().Clear();
406 ASSERT(IsWhite(old_mark_bit));
407 heap_->incremental_marking()->WhiteToGreyAndPush(
408 HeapObject::FromAddress(new_start), new_mark_bit);
409 heap_->incremental_marking()->RestartIfNotMarking();
413 ObjectColor new_color = Color(new_mark_bit);
414 ASSERT(new_color == old_color);
421 const char* AllocationSpaceName(AllocationSpace space) {
423 case NEW_SPACE: return "NEW_SPACE";
424 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
425 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
426 case CODE_SPACE: return "CODE_SPACE";
427 case MAP_SPACE: return "MAP_SPACE";
428 case CELL_SPACE: return "CELL_SPACE";
429 case LO_SPACE: return "LO_SPACE";
438 // Returns zero for pages that have so little fragmentation that it is not
439 // worth defragmenting them. Otherwise a positive integer that gives an
440 // estimate of fragmentation on an arbitrary scale.
441 static int FreeListFragmentation(PagedSpace* space, Page* p) {
442 // If page was not swept then there are no free list items on it.
443 if (!p->WasSwept()) {
444 if (FLAG_trace_fragmentation) {
445 PrintF("%p [%s]: %d bytes live (unswept)\n",
446 reinterpret_cast<void*>(p),
447 AllocationSpaceName(space->identity()),
453 FreeList::SizeStats sizes;
454 space->CountFreeListItems(p, &sizes);
457 intptr_t ratio_threshold;
458 intptr_t area_size = space->AreaSize();
459 if (space->identity() == CODE_SPACE) {
460 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
462 ratio_threshold = 10;
464 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
466 ratio_threshold = 15;
469 if (FLAG_trace_fragmentation) {
470 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
471 reinterpret_cast<void*>(p),
472 AllocationSpaceName(space->identity()),
473 static_cast<int>(sizes.small_size_),
474 static_cast<double>(sizes.small_size_ * 100) /
476 static_cast<int>(sizes.medium_size_),
477 static_cast<double>(sizes.medium_size_ * 100) /
479 static_cast<int>(sizes.large_size_),
480 static_cast<double>(sizes.large_size_ * 100) /
482 static_cast<int>(sizes.huge_size_),
483 static_cast<double>(sizes.huge_size_ * 100) /
485 (ratio > ratio_threshold) ? "[fragmented]" : "");
488 if (FLAG_always_compact && sizes.Total() != area_size) {
492 if (ratio <= ratio_threshold) return 0; // Not fragmented.
494 return static_cast<int>(ratio - ratio_threshold);
498 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
499 ASSERT(space->identity() == OLD_POINTER_SPACE ||
500 space->identity() == OLD_DATA_SPACE ||
501 space->identity() == CODE_SPACE);
503 int number_of_pages = space->CountTotalPages();
505 const int kMaxMaxEvacuationCandidates = 1000;
506 int max_evacuation_candidates = Min(
507 kMaxMaxEvacuationCandidates,
508 static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
510 if (FLAG_stress_compaction || FLAG_always_compact) {
511 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
516 Candidate() : fragmentation_(0), page_(NULL) { }
517 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
519 int fragmentation() { return fragmentation_; }
520 Page* page() { return page_; }
527 enum CompactionMode {
529 REDUCE_MEMORY_FOOTPRINT
532 CompactionMode mode = COMPACT_FREE_LISTS;
534 intptr_t reserved = number_of_pages * space->AreaSize();
535 intptr_t over_reserved = reserved - space->SizeOfObjects();
536 static const intptr_t kFreenessThreshold = 50;
538 if (over_reserved >= 2 * space->AreaSize() &&
539 reduce_memory_footprint_) {
540 mode = REDUCE_MEMORY_FOOTPRINT;
542 // We expect that empty pages are easier to compact so slightly bump the
544 max_evacuation_candidates += 2;
546 if (FLAG_trace_fragmentation) {
547 PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
548 static_cast<double>(over_reserved) / MB,
549 static_cast<int>(kFreenessThreshold));
553 intptr_t estimated_release = 0;
555 Candidate candidates[kMaxMaxEvacuationCandidates];
558 int fragmentation = 0;
559 Candidate* least = NULL;
561 PageIterator it(space);
562 if (it.has_next()) it.next(); // Never compact the first page.
564 while (it.has_next()) {
566 p->ClearEvacuationCandidate();
568 if (FLAG_stress_compaction) {
569 int counter = space->heap()->ms_count();
570 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
571 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
572 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
573 // Don't try to release too many pages.
574 if (estimated_release >= ((over_reserved * 3) / 4)) {
578 intptr_t free_bytes = 0;
580 if (!p->WasSwept()) {
581 free_bytes = (p->area_size() - p->LiveBytes());
583 FreeList::SizeStats sizes;
584 space->CountFreeListItems(p, &sizes);
585 free_bytes = sizes.Total();
588 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
590 if (free_pct >= kFreenessThreshold) {
591 estimated_release += 2 * p->area_size() - free_bytes;
592 fragmentation = free_pct;
597 if (FLAG_trace_fragmentation) {
598 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
599 reinterpret_cast<void*>(p),
600 AllocationSpaceName(space->identity()),
601 static_cast<int>(free_bytes),
602 static_cast<double>(free_bytes * 100) / p->area_size(),
603 (fragmentation > 0) ? "[fragmented]" : "");
606 fragmentation = FreeListFragmentation(space, p);
609 if (fragmentation != 0) {
610 if (count < max_evacuation_candidates) {
611 candidates[count++] = Candidate(fragmentation, p);
614 for (int i = 0; i < max_evacuation_candidates; i++) {
616 candidates[i].fragmentation() < least->fragmentation()) {
617 least = candidates + i;
621 if (least->fragmentation() < fragmentation) {
622 *least = Candidate(fragmentation, p);
629 for (int i = 0; i < count; i++) {
630 AddEvacuationCandidate(candidates[i].page());
633 if (count > 0 && FLAG_trace_fragmentation) {
634 PrintF("Collected %d evacuation candidates for space %s\n",
636 AllocationSpaceName(space->identity()));
641 void MarkCompactCollector::AbortCompaction() {
643 int npages = evacuation_candidates_.length();
644 for (int i = 0; i < npages; i++) {
645 Page* p = evacuation_candidates_[i];
646 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
647 p->ClearEvacuationCandidate();
648 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
651 evacuation_candidates_.Rewind(0);
652 invalidated_code_.Rewind(0);
654 ASSERT_EQ(0, evacuation_candidates_.length());
658 void MarkCompactCollector::Prepare(GCTracer* tracer) {
659 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
661 // Monomorphic ICs are preserved when possible, but need to be flushed
662 // when they might be keeping a Context alive, or when the heap is about
664 flush_monomorphic_ics_ =
665 heap()->isolate()->context_exit_happened() || Serializer::enabled();
667 // Rather than passing the tracer around we stash it in a static member
672 ASSERT(state_ == IDLE);
676 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
678 #ifdef ENABLE_GDB_JIT_INTERFACE
680 // If GDBJIT interface is active disable compaction.
681 compacting_collection_ = false;
685 // Clear marking bits if incremental marking is aborted.
686 if (was_marked_incrementally_ && abort_incremental_marking_) {
687 heap()->incremental_marking()->Abort();
690 was_marked_incrementally_ = false;
693 // Don't start compaction if we are in the middle of incremental
694 // marking cycle. We did not collect any slots.
695 if (!FLAG_never_compact && !was_marked_incrementally_) {
696 StartCompaction(NON_INCREMENTAL_COMPACTION);
700 for (PagedSpace* space = spaces.next();
702 space = spaces.next()) {
703 space->PrepareForMarkCompact();
707 if (!was_marked_incrementally_ && FLAG_verify_heap) {
708 VerifyMarkbitsAreClean();
714 void MarkCompactCollector::Finish() {
716 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
719 // The stub cache is not traversed during GC; clear the cache to
720 // force lazy re-initialization of it. This must be done after the
721 // GC, because it relies on the new address of certain old space
722 // objects (empty string, illegal builtin).
723 heap()->isolate()->stub_cache()->Clear();
725 heap()->external_string_table_.CleanUp();
729 // -------------------------------------------------------------------------
730 // Phase 1: tracing and marking live objects.
731 // before: all objects are in normal state.
732 // after: a live object's map pointer is marked as '00'.
734 // Marking all live objects in the heap as part of mark-sweep or mark-compact
735 // collection. Before marking, all objects are in their normal state. After
736 // marking, live objects' map pointers are marked indicating that the object
737 // has been found reachable.
739 // The marking algorithm is a (mostly) depth-first (because of possible stack
740 // overflow) traversal of the graph of objects reachable from the roots. It
741 // uses an explicit stack of pointers rather than recursion. The young
742 // generation's inactive ('from') space is used as a marking stack. The
743 // objects in the marking stack are the ones that have been reached and marked
744 // but their children have not yet been visited.
746 // The marking stack can overflow during traversal. In that case, we set an
747 // overflow flag. When the overflow flag is set, we continue marking objects
748 // reachable from the objects on the marking stack, but no longer push them on
749 // the marking stack. Instead, we mark them as both marked and overflowed.
750 // When the stack is in the overflowed state, objects marked as overflowed
751 // have been reached and marked but their children have not been visited yet.
752 // After emptying the marking stack, we clear the overflow flag and traverse
753 // the heap looking for objects marked as overflowed, push them on the stack,
754 // and continue with marking. This process repeats until all reachable
755 // objects have been marked.
759 explicit CodeFlusher(Isolate* isolate)
761 jsfunction_candidates_head_(NULL),
762 shared_function_info_candidates_head_(NULL) {}
764 void AddCandidate(SharedFunctionInfo* shared_info) {
765 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
766 shared_function_info_candidates_head_ = shared_info;
769 void AddCandidate(JSFunction* function) {
770 ASSERT(function->code() == function->shared()->code());
772 SetNextCandidate(function, jsfunction_candidates_head_);
773 jsfunction_candidates_head_ = function;
776 void ProcessCandidates() {
777 ProcessSharedFunctionInfoCandidates();
778 ProcessJSFunctionCandidates();
782 void ProcessJSFunctionCandidates() {
783 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
785 JSFunction* candidate = jsfunction_candidates_head_;
786 JSFunction* next_candidate;
787 while (candidate != NULL) {
788 next_candidate = GetNextCandidate(candidate);
790 SharedFunctionInfo* shared = candidate->shared();
792 Code* code = shared->code();
793 MarkBit code_mark = Marking::MarkBitFrom(code);
794 if (!code_mark.Get()) {
795 shared->set_code(lazy_compile);
796 candidate->set_code(lazy_compile);
798 candidate->set_code(shared->code());
801 // We are in the middle of a GC cycle so the write barrier in the code
802 // setter did not record the slot update and we have to do that manually.
803 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
804 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
805 isolate_->heap()->mark_compact_collector()->
806 RecordCodeEntrySlot(slot, target);
808 RecordSharedFunctionInfoCodeSlot(shared);
810 candidate = next_candidate;
813 jsfunction_candidates_head_ = NULL;
817 void ProcessSharedFunctionInfoCandidates() {
818 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
820 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
821 SharedFunctionInfo* next_candidate;
822 while (candidate != NULL) {
823 next_candidate = GetNextCandidate(candidate);
824 SetNextCandidate(candidate, NULL);
826 Code* code = candidate->code();
827 MarkBit code_mark = Marking::MarkBitFrom(code);
828 if (!code_mark.Get()) {
829 candidate->set_code(lazy_compile);
832 RecordSharedFunctionInfoCodeSlot(candidate);
834 candidate = next_candidate;
837 shared_function_info_candidates_head_ = NULL;
840 void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
841 Object** slot = HeapObject::RawField(shared,
842 SharedFunctionInfo::kCodeOffset);
843 isolate_->heap()->mark_compact_collector()->
844 RecordSlot(slot, slot, HeapObject::cast(*slot));
847 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
848 return reinterpret_cast<JSFunction**>(
849 candidate->address() + JSFunction::kCodeEntryOffset);
852 static JSFunction* GetNextCandidate(JSFunction* candidate) {
853 return *GetNextCandidateField(candidate);
856 static void SetNextCandidate(JSFunction* candidate,
857 JSFunction* next_candidate) {
858 *GetNextCandidateField(candidate) = next_candidate;
861 static SharedFunctionInfo** GetNextCandidateField(
862 SharedFunctionInfo* candidate) {
863 Code* code = candidate->code();
864 return reinterpret_cast<SharedFunctionInfo**>(
865 code->address() + Code::kGCMetadataOffset);
868 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
869 return reinterpret_cast<SharedFunctionInfo*>(
870 candidate->code()->gc_metadata());
873 static void SetNextCandidate(SharedFunctionInfo* candidate,
874 SharedFunctionInfo* next_candidate) {
875 candidate->code()->set_gc_metadata(next_candidate);
879 JSFunction* jsfunction_candidates_head_;
880 SharedFunctionInfo* shared_function_info_candidates_head_;
882 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
886 MarkCompactCollector::~MarkCompactCollector() {
887 if (code_flusher_ != NULL) {
888 delete code_flusher_;
889 code_flusher_ = NULL;
894 static inline HeapObject* ShortCircuitConsString(Object** p) {
895 // Optimization: If the heap object pointed to by p is a non-symbol
896 // cons string whose right substring is HEAP->empty_string, update
897 // it in place to its left substring. Return the updated value.
899 // Here we assume that if we change *p, we replace it with a heap object
900 // (i.e., the left substring of a cons string is always a heap object).
902 // The check performed is:
903 // object->IsConsString() && !object->IsSymbol() &&
904 // (ConsString::cast(object)->second() == HEAP->empty_string())
905 // except the maps for the object and its possible substrings might be
907 HeapObject* object = HeapObject::cast(*p);
908 if (!FLAG_clever_optimizations) return object;
909 Map* map = object->map();
910 InstanceType type = map->instance_type();
911 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
913 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
914 Heap* heap = map->GetHeap();
915 if (second != heap->empty_string()) {
919 // Since we don't have the object's start, it is impossible to update the
920 // page dirty marks. Therefore, we only replace the string with its left
921 // substring when page dirty marks do not change.
922 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
923 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
926 return HeapObject::cast(first);
930 class StaticMarkingVisitor : public StaticVisitorBase {
932 static inline void IterateBody(Map* map, HeapObject* obj) {
933 table_.GetVisitor(map)(map, obj);
936 static void Initialize() {
937 table_.Register(kVisitShortcutCandidate,
938 &FixedBodyVisitor<StaticMarkingVisitor,
939 ConsString::BodyDescriptor,
942 table_.Register(kVisitConsString,
943 &FixedBodyVisitor<StaticMarkingVisitor,
944 ConsString::BodyDescriptor,
947 table_.Register(kVisitSlicedString,
948 &FixedBodyVisitor<StaticMarkingVisitor,
949 SlicedString::BodyDescriptor,
952 table_.Register(kVisitFixedArray,
953 &FlexibleBodyVisitor<StaticMarkingVisitor,
954 FixedArray::BodyDescriptor,
957 table_.Register(kVisitGlobalContext, &VisitGlobalContext);
959 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
961 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
962 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
963 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
964 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
966 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
968 table_.Register(kVisitOddball,
969 &FixedBodyVisitor<StaticMarkingVisitor,
970 Oddball::BodyDescriptor,
972 table_.Register(kVisitMap,
973 &FixedBodyVisitor<StaticMarkingVisitor,
977 table_.Register(kVisitCode, &VisitCode);
979 table_.Register(kVisitSharedFunctionInfo,
980 &VisitSharedFunctionInfoAndFlushCode);
982 table_.Register(kVisitJSFunction,
983 &VisitJSFunctionAndFlushCode);
985 table_.Register(kVisitJSRegExp,
986 &VisitRegExpAndFlushCode);
988 table_.Register(kVisitPropertyCell,
989 &FixedBodyVisitor<StaticMarkingVisitor,
990 JSGlobalPropertyCell::BodyDescriptor,
993 table_.RegisterSpecializations<DataObjectVisitor,
995 kVisitDataObjectGeneric>();
997 table_.RegisterSpecializations<JSObjectVisitor,
999 kVisitJSObjectGeneric>();
1001 table_.RegisterSpecializations<StructObjectVisitor,
1003 kVisitStructGeneric>();
1006 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1007 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1010 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1011 // Mark all objects pointed to in [start, end).
1012 const int kMinRangeForMarkingRecursion = 64;
1013 if (end - start >= kMinRangeForMarkingRecursion) {
1014 if (VisitUnmarkedObjects(heap, start, end)) return;
1015 // We are close to a stack overflow, so just mark the objects.
1017 MarkCompactCollector* collector = heap->mark_compact_collector();
1018 for (Object** p = start; p < end; p++) {
1019 MarkObjectByPointer(collector, start, p);
1023 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
1024 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
1025 JSGlobalPropertyCell* cell =
1026 JSGlobalPropertyCell::cast(rinfo->target_cell());
1027 MarkBit mark = Marking::MarkBitFrom(cell);
1028 heap->mark_compact_collector()->MarkObject(cell, mark);
1031 static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
1032 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1033 // TODO(mstarzinger): We do not short-circuit cons strings here, verify
1034 // that there can be no such embedded pointers and add assertion here.
1035 HeapObject* object = HeapObject::cast(rinfo->target_object());
1036 heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
1037 MarkBit mark = Marking::MarkBitFrom(object);
1038 heap->mark_compact_collector()->MarkObject(object, mark);
1041 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
1042 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1043 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1044 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
1045 && (target->ic_state() == MEGAMORPHIC ||
1046 heap->mark_compact_collector()->flush_monomorphic_ics_ ||
1047 target->ic_age() != heap->global_ic_age())) {
1048 IC::Clear(rinfo->pc());
1049 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1051 MarkBit code_mark = Marking::MarkBitFrom(target);
1052 heap->mark_compact_collector()->MarkObject(target, code_mark);
1053 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1056 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
1057 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1058 rinfo->IsPatchedReturnSequence()) ||
1059 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1060 rinfo->IsPatchedDebugBreakSlotSequence()));
1061 Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1062 MarkBit code_mark = Marking::MarkBitFrom(target);
1063 heap->mark_compact_collector()->MarkObject(target, code_mark);
1064 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1067 // Mark object pointed to by p.
1068 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1069 Object** anchor_slot,
1071 if (!(*p)->IsHeapObject()) return;
1072 HeapObject* object = ShortCircuitConsString(p);
1073 collector->RecordSlot(anchor_slot, p, object);
1074 MarkBit mark = Marking::MarkBitFrom(object);
1075 collector->MarkObject(object, mark);
1079 // Visit an unmarked object.
1080 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1083 ASSERT(Isolate::Current()->heap()->Contains(obj));
1084 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1086 Map* map = obj->map();
1087 Heap* heap = obj->GetHeap();
1088 MarkBit mark = Marking::MarkBitFrom(obj);
1089 heap->mark_compact_collector()->SetMark(obj, mark);
1090 // Mark the map pointer and the body.
1091 MarkBit map_mark = Marking::MarkBitFrom(map);
1092 heap->mark_compact_collector()->MarkObject(map, map_mark);
1093 IterateBody(map, obj);
1096 // Visit all unmarked objects pointed to by [start, end).
1097 // Returns false if the operation fails (lack of stack space).
1098 static inline bool VisitUnmarkedObjects(Heap* heap,
1101 // Return false is we are close to the stack limit.
1102 StackLimitCheck check(heap->isolate());
1103 if (check.HasOverflowed()) return false;
1105 MarkCompactCollector* collector = heap->mark_compact_collector();
1106 // Visit the unmarked objects.
1107 for (Object** p = start; p < end; p++) {
1109 if (!o->IsHeapObject()) continue;
1110 collector->RecordSlot(start, p, o);
1111 HeapObject* obj = HeapObject::cast(o);
1112 MarkBit mark = Marking::MarkBitFrom(obj);
1113 if (mark.Get()) continue;
1114 VisitUnmarkedObject(collector, obj);
1119 static inline void VisitExternalReference(Address* p) { }
1120 static inline void VisitExternalReference(RelocInfo* rinfo) { }
1121 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
1124 class DataObjectVisitor {
1127 static void VisitSpecialized(Map* map, HeapObject* object) {
1130 static void Visit(Map* map, HeapObject* object) {
1134 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1135 JSObject::BodyDescriptor,
1136 void> JSObjectVisitor;
1138 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1139 StructBodyDescriptor,
1140 void> StructObjectVisitor;
1142 static void VisitJSWeakMap(Map* map, HeapObject* object) {
1143 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1144 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1146 // Enqueue weak map in linked list of encountered weak maps.
1147 if (weak_map->next() == Smi::FromInt(0)) {
1148 weak_map->set_next(collector->encountered_weak_maps());
1149 collector->set_encountered_weak_maps(weak_map);
1152 // Skip visiting the backing hash table containing the mappings.
1153 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
1154 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1157 JSWeakMap::BodyDescriptor::kStartOffset,
1158 JSWeakMap::kTableOffset);
1159 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1162 JSWeakMap::kTableOffset + kPointerSize,
1165 // Mark the backing hash table without pushing it on the marking stack.
1166 Object* table_object = weak_map->table();
1167 if (!table_object->IsHashTable()) return;
1168 ObjectHashTable* table = ObjectHashTable::cast(table_object);
1169 Object** table_slot =
1170 HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
1171 MarkBit table_mark = Marking::MarkBitFrom(table);
1172 collector->RecordSlot(table_slot, table_slot, table);
1173 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1174 // Recording the map slot can be skipped, because maps are not compacted.
1175 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1176 ASSERT(MarkCompactCollector::IsMarked(table->map()));
1179 static void VisitCode(Map* map, HeapObject* object) {
1180 Heap* heap = map->GetHeap();
1181 Code* code = reinterpret_cast<Code*>(object);
1182 if (FLAG_cleanup_code_caches_at_gc) {
1183 code->ClearTypeFeedbackCells(heap);
1185 code->CodeIterateBody<StaticMarkingVisitor>(heap);
1188 // Code flushing support.
1190 // How many collections newly compiled code object will survive before being
1192 static const int kCodeAgeThreshold = 5;
1194 static const int kRegExpCodeThreshold = 5;
1196 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
1197 Object* undefined = heap->undefined_value();
1198 return (info->script() != undefined) &&
1199 (reinterpret_cast<Script*>(info->script())->source() != undefined);
1203 inline static bool IsCompiled(JSFunction* function) {
1204 return function->code() !=
1205 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1208 inline static bool IsCompiled(SharedFunctionInfo* function) {
1209 return function->code() !=
1210 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1213 inline static bool IsFlushable(Heap* heap, JSFunction* function) {
1214 SharedFunctionInfo* shared_info = function->unchecked_shared();
1216 // Code is either on stack, in compilation cache or referenced
1217 // by optimized version of function.
1218 MarkBit code_mark = Marking::MarkBitFrom(function->code());
1219 if (code_mark.Get()) {
1220 if (!Marking::MarkBitFrom(shared_info).Get()) {
1221 shared_info->set_code_age(0);
1226 // We do not flush code for optimized functions.
1227 if (function->code() != shared_info->code()) {
1231 return IsFlushable(heap, shared_info);
1234 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
1235 // Code is either on stack, in compilation cache or referenced
1236 // by optimized version of function.
1238 Marking::MarkBitFrom(shared_info->code());
1239 if (code_mark.Get()) {
1243 // The function must be compiled and have the source code available,
1244 // to be able to recompile it in case we need the function again.
1245 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
1249 // We never flush code for Api functions.
1250 Object* function_data = shared_info->function_data();
1251 if (function_data->IsFunctionTemplateInfo()) {
1255 // Only flush code for functions.
1256 if (shared_info->code()->kind() != Code::FUNCTION) {
1260 // Function must be lazy compilable.
1261 if (!shared_info->allows_lazy_compilation()) {
1265 // If this is a full script wrapped in a function we do no flush the code.
1266 if (shared_info->is_toplevel()) {
1270 // Age this shared function info.
1271 if (shared_info->code_age() < kCodeAgeThreshold) {
1272 shared_info->set_code_age(shared_info->code_age() + 1);
1280 static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
1281 if (!IsFlushable(heap, function)) return false;
1283 // This function's code looks flushable. But we have to postpone the
1284 // decision until we see all functions that point to the same
1285 // SharedFunctionInfo because some of them might be optimized.
1286 // That would make the nonoptimized version of the code nonflushable,
1287 // because it is required for bailing out from optimized code.
1288 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
1292 static inline bool IsValidNotBuiltinContext(Object* ctx) {
1293 return ctx->IsContext() &&
1294 !Context::cast(ctx)->global()->IsJSBuiltinsObject();
1298 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
1299 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1301 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1303 FixedBodyVisitor<StaticMarkingVisitor,
1304 SharedFunctionInfo::BodyDescriptor,
1305 void>::Visit(map, object);
1309 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1312 // Make sure that the fixed array is in fact initialized on the RegExp.
1313 // We could potentially trigger a GC when initializing the RegExp.
1314 if (HeapObject::cast(re->data())->map()->instance_type() !=
1315 FIXED_ARRAY_TYPE) return;
1317 // Make sure this is a RegExp that actually contains code.
1318 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1320 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
1321 if (!code->IsSmi() &&
1322 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1323 // Save a copy that can be reinstated if we need the code again.
1324 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1328 // Saving a copy might create a pointer into compaction candidate
1329 // that was not observed by marker. This might happen if JSRegExp data
1330 // was marked through the compilation cache before marker reached JSRegExp
1332 FixedArray* data = FixedArray::cast(re->data());
1333 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1334 heap->mark_compact_collector()->
1335 RecordSlot(slot, slot, code);
1337 // Set a number in the 0-255 range to guarantee no smi overflow.
1338 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1339 Smi::FromInt(heap->sweep_generation() & 0xff),
1341 } else if (code->IsSmi()) {
1342 int value = Smi::cast(code)->value();
1343 // The regexp has not been compiled yet or there was a compilation error.
1344 if (value == JSRegExp::kUninitializedValue ||
1345 value == JSRegExp::kCompilationErrorValue) {
1349 // Check if we should flush now.
1350 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1351 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1352 Smi::FromInt(JSRegExp::kUninitializedValue),
1354 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1355 Smi::FromInt(JSRegExp::kUninitializedValue),
1362 // Works by setting the current sweep_generation (as a smi) in the
1363 // code object place in the data array of the RegExp and keeps a copy
1364 // around that can be reinstated if we reuse the RegExp before flushing.
1365 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1366 // we flush the code.
1367 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1368 Heap* heap = map->GetHeap();
1369 MarkCompactCollector* collector = heap->mark_compact_collector();
1370 if (!collector->is_code_flushing_enabled()) {
1371 VisitJSRegExpFields(map, object);
1374 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1375 // Flush code or set age on both ASCII and two byte code.
1376 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1377 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1378 // Visit the fields of the RegExp, including the updated FixedArray.
1379 VisitJSRegExpFields(map, object);
1383 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
1384 HeapObject* object) {
1385 Heap* heap = map->GetHeap();
1386 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1387 if (shared->ic_age() != heap->global_ic_age()) {
1388 shared->ResetForNewContext(heap->global_ic_age());
1391 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1392 if (!collector->is_code_flushing_enabled()) {
1393 VisitSharedFunctionInfoGeneric(map, object);
1396 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
1400 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
1401 Map* map, HeapObject* object, bool known_flush_code_candidate) {
1402 Heap* heap = map->GetHeap();
1403 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1405 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1407 if (!known_flush_code_candidate) {
1408 known_flush_code_candidate = IsFlushable(heap, shared);
1409 if (known_flush_code_candidate) {
1410 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
1414 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
1418 static void VisitCodeEntry(Heap* heap, Address entry_address) {
1419 Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1420 MarkBit mark = Marking::MarkBitFrom(code);
1421 heap->mark_compact_collector()->MarkObject(code, mark);
1422 heap->mark_compact_collector()->
1423 RecordCodeEntrySlot(entry_address, code);
1426 static void VisitGlobalContext(Map* map, HeapObject* object) {
1427 FixedBodyVisitor<StaticMarkingVisitor,
1428 Context::MarkCompactBodyDescriptor,
1429 void>::Visit(map, object);
1431 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1432 for (int idx = Context::FIRST_WEAK_SLOT;
1433 idx < Context::GLOBAL_CONTEXT_SLOTS;
1436 HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
1437 collector->RecordSlot(slot, slot, *slot);
1441 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
1442 Heap* heap = map->GetHeap();
1443 MarkCompactCollector* collector = heap->mark_compact_collector();
1444 if (!collector->is_code_flushing_enabled()) {
1445 VisitJSFunction(map, object);
1449 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
1450 // The function must have a valid context and not be a builtin.
1451 bool flush_code_candidate = false;
1452 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
1453 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
1456 if (!flush_code_candidate) {
1457 Code* code = jsfunction->shared()->code();
1458 MarkBit code_mark = Marking::MarkBitFrom(code);
1459 collector->MarkObject(code, code_mark);
1461 if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
1462 collector->MarkInlinedFunctionsCode(jsfunction->code());
1466 VisitJSFunctionFields(map,
1467 reinterpret_cast<JSFunction*>(object),
1468 flush_code_candidate);
1472 static void VisitJSFunction(Map* map, HeapObject* object) {
1473 VisitJSFunctionFields(map,
1474 reinterpret_cast<JSFunction*>(object),
1479 #define SLOT_ADDR(obj, offset) \
1480 reinterpret_cast<Object**>((obj)->address() + offset)
1483 static inline void VisitJSFunctionFields(Map* map,
1485 bool flush_code_candidate) {
1486 Heap* heap = map->GetHeap();
1489 HeapObject::RawField(object, JSFunction::kPropertiesOffset),
1490 HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
1492 if (!flush_code_candidate) {
1493 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
1495 // Don't visit code object.
1497 // Visit shared function info to avoid double checking of it's
1499 SharedFunctionInfo* shared_info = object->unchecked_shared();
1500 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1501 if (!shared_info_mark.Get()) {
1502 Map* shared_info_map = shared_info->map();
1503 MarkBit shared_info_map_mark =
1504 Marking::MarkBitFrom(shared_info_map);
1505 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1506 heap->mark_compact_collector()->MarkObject(shared_info_map,
1507 shared_info_map_mark);
1508 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
1516 HeapObject::RawField(object,
1517 JSFunction::kCodeEntryOffset + kPointerSize),
1518 HeapObject::RawField(object,
1519 JSFunction::kNonWeakFieldsEndOffset));
1522 static inline void VisitJSRegExpFields(Map* map,
1523 HeapObject* object) {
1524 int last_property_offset =
1525 JSRegExp::kSize + kPointerSize * map->inobject_properties();
1526 VisitPointers(map->GetHeap(),
1527 SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
1528 SLOT_ADDR(object, last_property_offset));
1532 static void VisitSharedFunctionInfoFields(Heap* heap,
1534 bool flush_code_candidate) {
1535 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
1537 if (!flush_code_candidate) {
1538 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
1542 SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
1543 SLOT_ADDR(object, SharedFunctionInfo::kSize));
1548 typedef void (*Callback)(Map* map, HeapObject* object);
1550 static VisitorDispatchTable<Callback> table_;
1554 VisitorDispatchTable<StaticMarkingVisitor::Callback>
1555 StaticMarkingVisitor::table_;
1558 class MarkingVisitor : public ObjectVisitor {
1560 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1562 void VisitPointer(Object** p) {
1563 StaticMarkingVisitor::VisitPointer(heap_, p);
1566 void VisitPointers(Object** start, Object** end) {
1567 StaticMarkingVisitor::VisitPointers(heap_, start, end);
1575 class CodeMarkingVisitor : public ThreadVisitor {
1577 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1578 : collector_(collector) {}
1580 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1581 collector_->PrepareThreadForCodeFlushing(isolate, top);
1585 MarkCompactCollector* collector_;
1589 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1591 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1592 : collector_(collector) {}
1594 void VisitPointers(Object** start, Object** end) {
1595 for (Object** p = start; p < end; p++) VisitPointer(p);
1598 void VisitPointer(Object** slot) {
1599 Object* obj = *slot;
1600 if (obj->IsSharedFunctionInfo()) {
1601 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1602 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1603 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1604 collector_->MarkObject(shared->code(), code_mark);
1605 collector_->MarkObject(shared, shared_mark);
1610 MarkCompactCollector* collector_;
1614 void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1615 // For optimized functions we should retain both non-optimized version
1616 // of it's code and non-optimized version of all inlined functions.
1617 // This is required to support bailing out from inlined code.
1618 DeoptimizationInputData* data =
1619 DeoptimizationInputData::cast(code->deoptimization_data());
1621 FixedArray* literals = data->LiteralArray();
1623 for (int i = 0, count = data->InlinedFunctionCount()->value();
1626 JSFunction* inlined = JSFunction::cast(literals->get(i));
1627 Code* inlined_code = inlined->shared()->code();
1628 MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
1629 MarkObject(inlined_code, inlined_code_mark);
1634 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1635 ThreadLocalTop* top) {
1636 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1637 // Note: for the frame that has a pending lazy deoptimization
1638 // StackFrame::unchecked_code will return a non-optimized code object for
1639 // the outermost function and StackFrame::LookupCode will return
1640 // actual optimized code object.
1641 StackFrame* frame = it.frame();
1642 Code* code = frame->unchecked_code();
1643 MarkBit code_mark = Marking::MarkBitFrom(code);
1644 MarkObject(code, code_mark);
1645 if (frame->is_optimized()) {
1646 MarkInlinedFunctionsCode(frame->LookupCode());
1652 void MarkCompactCollector::PrepareForCodeFlushing() {
1653 ASSERT(heap() == Isolate::Current()->heap());
1655 // TODO(1609) Currently incremental marker does not support code flushing.
1656 if (!FLAG_flush_code || was_marked_incrementally_) {
1657 EnableCodeFlushing(false);
1661 #ifdef ENABLE_DEBUGGER_SUPPORT
1662 if (heap()->isolate()->debug()->IsLoaded() ||
1663 heap()->isolate()->debug()->has_break_points()) {
1664 EnableCodeFlushing(false);
1669 EnableCodeFlushing(true);
1671 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1672 // relies on it being marked before any other descriptor array.
1673 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1674 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1675 MarkObject(descriptor_array, descriptor_array_mark);
1677 // Make sure we are not referencing the code from the stack.
1678 ASSERT(this == heap()->mark_compact_collector());
1679 PrepareThreadForCodeFlushing(heap()->isolate(),
1680 heap()->isolate()->thread_local_top());
1682 // Iterate the archived stacks in all threads to check if
1683 // the code is referenced.
1684 CodeMarkingVisitor code_marking_visitor(this);
1685 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1686 &code_marking_visitor);
1688 SharedFunctionInfoMarkingVisitor visitor(this);
1689 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1690 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1692 ProcessMarkingDeque();
1696 // Visitor class for marking heap roots.
1697 class RootMarkingVisitor : public ObjectVisitor {
1699 explicit RootMarkingVisitor(Heap* heap)
1700 : collector_(heap->mark_compact_collector()) { }
1702 void VisitPointer(Object** p) {
1703 MarkObjectByPointer(p);
1706 void VisitPointers(Object** start, Object** end) {
1707 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1711 void MarkObjectByPointer(Object** p) {
1712 if (!(*p)->IsHeapObject()) return;
1714 // Replace flat cons strings in place.
1715 HeapObject* object = ShortCircuitConsString(p);
1716 MarkBit mark_bit = Marking::MarkBitFrom(object);
1717 if (mark_bit.Get()) return;
1719 Map* map = object->map();
1721 collector_->SetMark(object, mark_bit);
1723 // Mark the map pointer and body, and push them on the marking stack.
1724 MarkBit map_mark = Marking::MarkBitFrom(map);
1725 collector_->MarkObject(map, map_mark);
1726 StaticMarkingVisitor::IterateBody(map, object);
1728 // Mark all the objects reachable from the map and body. May leave
1729 // overflowed objects in the heap.
1730 collector_->EmptyMarkingDeque();
1733 MarkCompactCollector* collector_;
1737 // Helper class for pruning the symbol table.
1738 class SymbolTableCleaner : public ObjectVisitor {
1740 explicit SymbolTableCleaner(Heap* heap)
1741 : heap_(heap), pointers_removed_(0) { }
1743 virtual void VisitPointers(Object** start, Object** end) {
1744 // Visit all HeapObject pointers in [start, end).
1745 for (Object** p = start; p < end; p++) {
1747 if (o->IsHeapObject() &&
1748 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1749 // Check if the symbol being pruned is an external symbol. We need to
1750 // delete the associated external data as this symbol is going away.
1752 // Since no objects have yet been moved we can safely access the map of
1754 if (o->IsExternalString() ||
1755 (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
1756 heap_->FinalizeExternalString(HeapObject::cast(*p));
1758 // Set the entry to the_hole_value (as deleted).
1759 *p = heap_->the_hole_value();
1760 pointers_removed_++;
1765 int PointersRemoved() {
1766 return pointers_removed_;
1771 int pointers_removed_;
1775 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1777 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1779 virtual Object* RetainAs(Object* object) {
1780 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1789 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1790 ASSERT(IsMarked(object));
1791 ASSERT(HEAP->Contains(object));
1792 if (object->IsMap()) {
1793 Map* map = Map::cast(object);
1794 heap_->ClearCacheOnMap(map);
1796 // When map collection is enabled we have to mark through map's transitions
1797 // in a special way to make transition links weak. Only maps for subclasses
1798 // of JSReceiver can have transitions.
1799 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1800 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1801 marker_.MarkMapContents(map);
1803 marking_deque_.PushBlack(map);
1806 marking_deque_.PushBlack(object);
1811 // Force instantiation of template instances.
1812 template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
1813 template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
1817 void Marker<T>::MarkMapContents(Map* map) {
1818 // Mark prototype transitions array but don't push it into marking stack.
1819 // This will make references from it weak. We will clean dead prototype
1820 // transitions in ClearNonLiveTransitions.
1821 Object** proto_trans_slot =
1822 HeapObject::RawField(map, Map::kPrototypeTransitionsOrBackPointerOffset);
1823 HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
1824 if (prototype_transitions->IsFixedArray()) {
1825 mark_compact_collector()->RecordSlot(proto_trans_slot,
1827 prototype_transitions);
1828 MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1831 MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
1832 prototype_transitions->Size());
1836 // Make sure that the back pointer stored either in the map itself or inside
1837 // its prototype transitions array is marked. Treat pointers in the descriptor
1838 // array as weak and also mark that array to prevent visiting it later.
1839 base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
1841 Object** descriptor_array_slot =
1842 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
1843 Object* descriptor_array = *descriptor_array_slot;
1844 if (!descriptor_array->IsSmi()) {
1845 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
1848 // Mark the Object* fields of the Map. Since the descriptor array has been
1849 // marked already, it is fine that one of these fields contains a pointer
1850 // to it. But make sure to skip back pointer and prototype transitions.
1851 STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
1852 Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
1853 Object** start_slot = HeapObject::RawField(
1854 map, Map::kPointerFieldsBeginOffset);
1855 Object** end_slot = HeapObject::RawField(
1856 map, Map::kPrototypeTransitionsOrBackPointerOffset);
1857 for (Object** slot = start_slot; slot < end_slot; slot++) {
1858 Object* obj = *slot;
1859 if (!obj->NonFailureIsHeapObject()) continue;
1860 mark_compact_collector()->RecordSlot(start_slot, slot, obj);
1861 base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
1867 void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
1868 // Empty descriptor array is marked as a root before any maps are marked.
1869 ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
1871 // The DescriptorArray contains a pointer to its contents array, but the
1872 // contents array will be marked black and hence not be visited again.
1873 if (!base_marker()->MarkObjectAndPush(descriptors)) return;
1874 FixedArray* contents = FixedArray::cast(
1875 descriptors->get(DescriptorArray::kContentArrayIndex));
1876 ASSERT(contents->length() >= 2);
1877 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
1878 base_marker()->MarkObjectWithoutPush(contents);
1880 // Contents contains (value, details) pairs. If the descriptor contains a
1881 // transition (value is a Map), we don't mark the value as live. It might
1882 // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
1883 for (int i = 0; i < contents->length(); i += 2) {
1884 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1886 Object** slot = contents->data_start() + i;
1887 if (!(*slot)->IsHeapObject()) continue;
1888 HeapObject* value = HeapObject::cast(*slot);
1890 mark_compact_collector()->RecordSlot(slot, slot, *slot);
1892 switch (details.type()) {
1895 case CONSTANT_FUNCTION:
1898 base_marker()->MarkObjectAndPush(value);
1901 if (!value->IsAccessorPair()) {
1902 base_marker()->MarkObjectAndPush(value);
1903 } else if (base_marker()->MarkObjectWithoutPush(value)) {
1904 AccessorPair* accessors = AccessorPair::cast(value);
1905 MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
1906 MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
1909 case ELEMENTS_TRANSITION:
1910 // For maps with multiple elements transitions, the transition maps are
1911 // stored in a FixedArray. Keep the fixed array alive but not the maps
1912 // that it refers to.
1913 if (value->IsFixedArray()) base_marker()->MarkObjectWithoutPush(value);
1915 case MAP_TRANSITION:
1916 case CONSTANT_TRANSITION:
1917 case NULL_DESCRIPTOR:
1925 void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
1926 Object** slot = HeapObject::RawField(accessors, offset);
1927 HeapObject* accessor = HeapObject::cast(*slot);
1928 if (accessor->IsMap()) return;
1929 mark_compact_collector()->RecordSlot(slot, slot, accessor);
1930 base_marker()->MarkObjectAndPush(accessor);
1934 // Fill the marking stack with overflowed objects returned by the given
1935 // iterator. Stop when the marking stack is filled or the end of the space
1936 // is reached, whichever comes first.
1938 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1939 MarkingDeque* marking_deque,
1941 // The caller should ensure that the marking stack is initially not full,
1942 // so that we don't waste effort pointlessly scanning for objects.
1943 ASSERT(!marking_deque->IsFull());
1945 Map* filler_map = heap->one_pointer_filler_map();
1946 for (HeapObject* object = it->Next();
1948 object = it->Next()) {
1949 MarkBit markbit = Marking::MarkBitFrom(object);
1950 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1951 Marking::GreyToBlack(markbit);
1952 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1953 marking_deque->PushBlack(object);
1954 if (marking_deque->IsFull()) return;
1960 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1963 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1964 ASSERT(!marking_deque->IsFull());
1965 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1966 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1967 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1968 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1970 MarkBit::CellType* cells = p->markbits()->cells();
1972 int last_cell_index =
1973 Bitmap::IndexToCell(
1974 Bitmap::CellAlignIndex(
1975 p->AddressToMarkbitIndex(p->area_end())));
1977 Address cell_base = p->area_start();
1978 int cell_index = Bitmap::IndexToCell(
1979 Bitmap::CellAlignIndex(
1980 p->AddressToMarkbitIndex(cell_base)));
1984 cell_index < last_cell_index;
1985 cell_index++, cell_base += 32 * kPointerSize) {
1986 ASSERT((unsigned)cell_index ==
1987 Bitmap::IndexToCell(
1988 Bitmap::CellAlignIndex(
1989 p->AddressToMarkbitIndex(cell_base))));
1991 const MarkBit::CellType current_cell = cells[cell_index];
1992 if (current_cell == 0) continue;
1994 const MarkBit::CellType next_cell = cells[cell_index + 1];
1995 MarkBit::CellType grey_objects = current_cell &
1996 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1999 while (grey_objects != 0) {
2000 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2001 grey_objects >>= trailing_zeros;
2002 offset += trailing_zeros;
2003 MarkBit markbit(&cells[cell_index], 1 << offset, false);
2004 ASSERT(Marking::IsGrey(markbit));
2005 Marking::GreyToBlack(markbit);
2006 Address addr = cell_base + offset * kPointerSize;
2007 HeapObject* object = HeapObject::FromAddress(addr);
2008 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2009 marking_deque->PushBlack(object);
2010 if (marking_deque->IsFull()) return;
2015 grey_objects >>= (Bitmap::kBitsPerCell - 1);
2020 static void DiscoverGreyObjectsInSpace(Heap* heap,
2021 MarkingDeque* marking_deque,
2022 PagedSpace* space) {
2023 if (!space->was_swept_conservatively()) {
2024 HeapObjectIterator it(space);
2025 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2027 PageIterator it(space);
2028 while (it.has_next()) {
2029 Page* p = it.next();
2030 DiscoverGreyObjectsOnPage(marking_deque, p);
2031 if (marking_deque->IsFull()) return;
2037 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2039 if (!o->IsHeapObject()) return false;
2040 HeapObject* heap_object = HeapObject::cast(o);
2041 MarkBit mark = Marking::MarkBitFrom(heap_object);
2046 void MarkCompactCollector::MarkSymbolTable() {
2047 SymbolTable* symbol_table = heap()->symbol_table();
2048 // Mark the symbol table itself.
2049 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
2050 SetMark(symbol_table, symbol_table_mark);
2051 // Explicitly mark the prefix.
2052 MarkingVisitor marker(heap());
2053 symbol_table->IteratePrefix(&marker);
2054 ProcessMarkingDeque();
2058 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2059 // Mark the heap roots including global variables, stack variables,
2060 // etc., and all objects reachable from them.
2061 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2063 // Handle the symbol table specially.
2066 // There may be overflowed objects in the heap. Visit them now.
2067 while (marking_deque_.overflowed()) {
2068 RefillMarkingDeque();
2069 EmptyMarkingDeque();
2074 void MarkCompactCollector::MarkObjectGroups() {
2075 List<ObjectGroup*>* object_groups =
2076 heap()->isolate()->global_handles()->object_groups();
2079 for (int i = 0; i < object_groups->length(); i++) {
2080 ObjectGroup* entry = object_groups->at(i);
2081 ASSERT(entry != NULL);
2083 Object*** objects = entry->objects_;
2084 bool group_marked = false;
2085 for (size_t j = 0; j < entry->length_; j++) {
2086 Object* object = *objects[j];
2087 if (object->IsHeapObject()) {
2088 HeapObject* heap_object = HeapObject::cast(object);
2089 MarkBit mark = Marking::MarkBitFrom(heap_object);
2091 group_marked = true;
2097 if (!group_marked) {
2098 (*object_groups)[last++] = entry;
2102 // An object in the group is marked, so mark as grey all white heap
2103 // objects in the group.
2104 for (size_t j = 0; j < entry->length_; ++j) {
2105 Object* object = *objects[j];
2106 if (object->IsHeapObject()) {
2107 HeapObject* heap_object = HeapObject::cast(object);
2108 MarkBit mark = Marking::MarkBitFrom(heap_object);
2109 MarkObject(heap_object, mark);
2113 // Once the entire group has been colored grey, set the object group
2114 // to NULL so it won't be processed again.
2116 object_groups->at(i) = NULL;
2118 object_groups->Rewind(last);
2122 void MarkCompactCollector::MarkImplicitRefGroups() {
2123 List<ImplicitRefGroup*>* ref_groups =
2124 heap()->isolate()->global_handles()->implicit_ref_groups();
2127 for (int i = 0; i < ref_groups->length(); i++) {
2128 ImplicitRefGroup* entry = ref_groups->at(i);
2129 ASSERT(entry != NULL);
2131 if (!IsMarked(*entry->parent_)) {
2132 (*ref_groups)[last++] = entry;
2136 Object*** children = entry->children_;
2137 // A parent object is marked, so mark all child heap objects.
2138 for (size_t j = 0; j < entry->length_; ++j) {
2139 if ((*children[j])->IsHeapObject()) {
2140 HeapObject* child = HeapObject::cast(*children[j]);
2141 MarkBit mark = Marking::MarkBitFrom(child);
2142 MarkObject(child, mark);
2146 // Once the entire group has been marked, dispose it because it's
2147 // not needed anymore.
2150 ref_groups->Rewind(last);
2154 // Mark all objects reachable from the objects on the marking stack.
2155 // Before: the marking stack contains zero or more heap object pointers.
2156 // After: the marking stack is empty, and all objects reachable from the
2157 // marking stack have been marked, or are overflowed in the heap.
2158 void MarkCompactCollector::EmptyMarkingDeque() {
2159 while (!marking_deque_.IsEmpty()) {
2160 while (!marking_deque_.IsEmpty()) {
2161 HeapObject* object = marking_deque_.Pop();
2162 ASSERT(object->IsHeapObject());
2163 ASSERT(heap()->Contains(object));
2164 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2166 Map* map = object->map();
2167 MarkBit map_mark = Marking::MarkBitFrom(map);
2168 MarkObject(map, map_mark);
2170 StaticMarkingVisitor::IterateBody(map, object);
2173 // Process encountered weak maps, mark objects only reachable by those
2174 // weak maps and repeat until fix-point is reached.
2180 // Sweep the heap for overflowed objects, clear their overflow bits, and
2181 // push them on the marking stack. Stop early if the marking stack fills
2182 // before sweeping completes. If sweeping completes, there are no remaining
2183 // overflowed objects in the heap so the overflow flag on the markings stack
2185 void MarkCompactCollector::RefillMarkingDeque() {
2186 ASSERT(marking_deque_.overflowed());
2188 SemiSpaceIterator new_it(heap()->new_space());
2189 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
2190 if (marking_deque_.IsFull()) return;
2192 DiscoverGreyObjectsInSpace(heap(),
2194 heap()->old_pointer_space());
2195 if (marking_deque_.IsFull()) return;
2197 DiscoverGreyObjectsInSpace(heap(),
2199 heap()->old_data_space());
2200 if (marking_deque_.IsFull()) return;
2202 DiscoverGreyObjectsInSpace(heap(),
2204 heap()->code_space());
2205 if (marking_deque_.IsFull()) return;
2207 DiscoverGreyObjectsInSpace(heap(),
2209 heap()->map_space());
2210 if (marking_deque_.IsFull()) return;
2212 DiscoverGreyObjectsInSpace(heap(),
2214 heap()->cell_space());
2215 if (marking_deque_.IsFull()) return;
2217 LargeObjectIterator lo_it(heap()->lo_space());
2218 DiscoverGreyObjectsWithIterator(heap(),
2221 if (marking_deque_.IsFull()) return;
2223 marking_deque_.ClearOverflowed();
2227 // Mark all objects reachable (transitively) from objects on the marking
2228 // stack. Before: the marking stack contains zero or more heap object
2229 // pointers. After: the marking stack is empty and there are no overflowed
2230 // objects in the heap.
2231 void MarkCompactCollector::ProcessMarkingDeque() {
2232 EmptyMarkingDeque();
2233 while (marking_deque_.overflowed()) {
2234 RefillMarkingDeque();
2235 EmptyMarkingDeque();
2240 void MarkCompactCollector::ProcessExternalMarking() {
2241 bool work_to_do = true;
2242 ASSERT(marking_deque_.IsEmpty());
2243 while (work_to_do) {
2245 MarkImplicitRefGroups();
2246 work_to_do = !marking_deque_.IsEmpty();
2247 ProcessMarkingDeque();
2252 void MarkCompactCollector::MarkLiveObjects() {
2253 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2254 // The recursive GC marker detects when it is nearing stack overflow,
2255 // and switches to a different marking system. JS interrupts interfere
2256 // with the C stack limit check.
2257 PostponeInterruptsScope postpone(heap()->isolate());
2259 bool incremental_marking_overflowed = false;
2260 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2261 if (was_marked_incrementally_) {
2262 // Finalize the incremental marking and check whether we had an overflow.
2263 // Both markers use grey color to mark overflowed objects so
2264 // non-incremental marker can deal with them as if overflow
2265 // occured during normal marking.
2266 // But incremental marker uses a separate marking deque
2267 // so we have to explicitly copy it's overflow state.
2268 incremental_marking->Finalize();
2269 incremental_marking_overflowed =
2270 incremental_marking->marking_deque()->overflowed();
2271 incremental_marking->marking_deque()->ClearOverflowed();
2273 // Abort any pending incremental activities e.g. incremental sweeping.
2274 incremental_marking->Abort();
2278 ASSERT(state_ == PREPARE_GC);
2279 state_ = MARK_LIVE_OBJECTS;
2281 // The to space contains live objects, a page in from space is used as a
2283 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2284 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2285 if (FLAG_force_marking_deque_overflows) {
2286 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2288 marking_deque_.Initialize(marking_deque_start,
2290 ASSERT(!marking_deque_.overflowed());
2292 if (incremental_marking_overflowed) {
2293 // There are overflowed objects left in the heap after incremental marking.
2294 marking_deque_.SetOverflowed();
2297 PrepareForCodeFlushing();
2299 if (was_marked_incrementally_) {
2300 // There is no write barrier on cells so we have to scan them now at the end
2301 // of the incremental marking.
2303 HeapObjectIterator cell_iterator(heap()->cell_space());
2305 while ((cell = cell_iterator.Next()) != NULL) {
2306 ASSERT(cell->IsJSGlobalPropertyCell());
2307 if (IsMarked(cell)) {
2308 int offset = JSGlobalPropertyCell::kValueOffset;
2309 StaticMarkingVisitor::VisitPointer(
2311 reinterpret_cast<Object**>(cell->address() + offset));
2317 RootMarkingVisitor root_visitor(heap());
2318 MarkRoots(&root_visitor);
2320 // The objects reachable from the roots are marked, yet unreachable
2321 // objects are unmarked. Mark objects reachable due to host
2322 // application specific logic.
2323 ProcessExternalMarking();
2325 // The objects reachable from the roots or object groups are marked,
2326 // yet unreachable objects are unmarked. Mark objects reachable
2327 // only from weak global handles.
2329 // First we identify nonlive weak handles and mark them as pending
2331 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2332 &IsUnmarkedHeapObject);
2333 // Then we mark the objects and process the transitive closure.
2334 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2335 while (marking_deque_.overflowed()) {
2336 RefillMarkingDeque();
2337 EmptyMarkingDeque();
2340 // Repeat host application specific marking to mark unmarked objects
2341 // reachable from the weak roots.
2342 ProcessExternalMarking();
2348 void MarkCompactCollector::AfterMarking() {
2349 // Object literal map caches reference symbols (cache keys) and maps
2350 // (cache values). At this point still useful maps have already been
2351 // marked. Mark the keys for the alive values before we process the
2355 // Prune the symbol table removing all symbols only pointed to by the
2356 // symbol table. Cannot use symbol_table() here because the symbol
2358 SymbolTable* symbol_table = heap()->symbol_table();
2359 SymbolTableCleaner v(heap());
2360 symbol_table->IterateElements(&v);
2361 symbol_table->ElementsRemoved(v.PointersRemoved());
2362 heap()->external_string_table_.Iterate(&v);
2363 heap()->external_string_table_.CleanUp();
2365 // Process the weak references.
2366 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2367 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2369 // Remove object groups after marking phase.
2370 heap()->isolate()->global_handles()->RemoveObjectGroups();
2371 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2373 // Flush code from collected candidates.
2374 if (is_code_flushing_enabled()) {
2375 code_flusher_->ProcessCandidates();
2378 if (!FLAG_watch_ic_patching) {
2379 // Clean up dead objects from the runtime profiler.
2380 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2385 void MarkCompactCollector::ProcessMapCaches() {
2386 Object* raw_context = heap()->global_contexts_list_;
2387 while (raw_context != heap()->undefined_value()) {
2388 Context* context = reinterpret_cast<Context*>(raw_context);
2389 if (IsMarked(context)) {
2390 HeapObject* raw_map_cache =
2391 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2392 // A map cache may be reachable from the stack. In this case
2393 // it's already transitively marked and it's too late to clean
2395 if (!IsMarked(raw_map_cache) &&
2396 raw_map_cache != heap()->undefined_value()) {
2397 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2398 int existing_elements = map_cache->NumberOfElements();
2399 int used_elements = 0;
2400 for (int i = MapCache::kElementsStartIndex;
2401 i < map_cache->length();
2402 i += MapCache::kEntrySize) {
2403 Object* raw_key = map_cache->get(i);
2404 if (raw_key == heap()->undefined_value() ||
2405 raw_key == heap()->the_hole_value()) continue;
2406 STATIC_ASSERT(MapCache::kEntrySize == 2);
2407 Object* raw_map = map_cache->get(i + 1);
2408 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2411 // Delete useless entries with unmarked maps.
2412 ASSERT(raw_map->IsMap());
2413 map_cache->set_the_hole(i);
2414 map_cache->set_the_hole(i + 1);
2417 if (used_elements == 0) {
2418 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2420 // Note: we don't actually shrink the cache here to avoid
2421 // extra complexity during GC. We rely on subsequent cache
2422 // usages (EnsureCapacity) to do this.
2423 map_cache->ElementsRemoved(existing_elements - used_elements);
2424 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2425 MarkObject(map_cache, map_cache_markbit);
2429 // Move to next element in the list.
2430 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2432 ProcessMarkingDeque();
2436 void MarkCompactCollector::ReattachInitialMaps() {
2437 HeapObjectIterator map_iterator(heap()->map_space());
2438 for (HeapObject* obj = map_iterator.Next();
2440 obj = map_iterator.Next()) {
2441 if (obj->IsFreeSpace()) continue;
2442 Map* map = Map::cast(obj);
2444 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2445 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2447 if (map->attached_to_shared_function_info()) {
2448 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2454 void MarkCompactCollector::ClearNonLiveTransitions() {
2455 HeapObjectIterator map_iterator(heap()->map_space());
2456 // Iterate over the map space, setting map transitions that go from
2457 // a marked map to an unmarked map to null transitions. This action
2458 // is carried out only on maps of JSObjects and related subtypes.
2459 for (HeapObject* obj = map_iterator.Next();
2460 obj != NULL; obj = map_iterator.Next()) {
2461 Map* map = reinterpret_cast<Map*>(obj);
2462 MarkBit map_mark = Marking::MarkBitFrom(map);
2463 if (map->IsFreeSpace()) continue;
2465 ASSERT(map->IsMap());
2466 // Only JSObject and subtypes have map transitions and back pointers.
2467 STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
2468 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
2470 if (map_mark.Get() &&
2471 map->attached_to_shared_function_info()) {
2472 // This map is used for inobject slack tracking and has been detached
2473 // from SharedFunctionInfo during the mark phase.
2474 // Since it survived the GC, reattach it now.
2475 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2478 ClearNonLivePrototypeTransitions(map);
2479 ClearNonLiveMapTransitions(map, map_mark);
2484 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2485 int number_of_transitions = map->NumberOfProtoTransitions();
2486 FixedArray* prototype_transitions = map->prototype_transitions();
2488 int new_number_of_transitions = 0;
2489 const int header = Map::kProtoTransitionHeaderSize;
2490 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2491 const int map_offset = header + Map::kProtoTransitionMapOffset;
2492 const int step = Map::kProtoTransitionElementsPerEntry;
2493 for (int i = 0; i < number_of_transitions; i++) {
2494 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2495 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2496 if (IsMarked(prototype) && IsMarked(cached_map)) {
2497 int proto_index = proto_offset + new_number_of_transitions * step;
2498 int map_index = map_offset + new_number_of_transitions * step;
2499 if (new_number_of_transitions != i) {
2500 prototype_transitions->set_unchecked(
2504 UPDATE_WRITE_BARRIER);
2505 prototype_transitions->set_unchecked(
2509 SKIP_WRITE_BARRIER);
2512 HeapObject::RawField(prototype_transitions,
2513 FixedArray::OffsetOfElementAt(proto_index));
2514 RecordSlot(slot, slot, prototype);
2515 new_number_of_transitions++;
2519 if (new_number_of_transitions != number_of_transitions) {
2520 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2523 // Fill slots that became free with undefined value.
2524 for (int i = new_number_of_transitions * step;
2525 i < number_of_transitions * step;
2527 prototype_transitions->set_undefined(heap_, header + i);
2532 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2534 Object* potential_parent = map->GetBackPointer();
2535 if (!potential_parent->IsMap()) return;
2536 Map* parent = Map::cast(potential_parent);
2538 // Follow back pointer, check whether we are dealing with a map transition
2539 // from a live map to a dead path and in case clear transitions of parent.
2540 bool current_is_alive = map_mark.Get();
2541 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2542 if (!current_is_alive && parent_is_alive) {
2543 parent->ClearNonLiveTransitions(heap());
2548 void MarkCompactCollector::ProcessWeakMaps() {
2549 Object* weak_map_obj = encountered_weak_maps();
2550 while (weak_map_obj != Smi::FromInt(0)) {
2551 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2552 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2553 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2554 Object** anchor = reinterpret_cast<Object**>(table->address());
2555 for (int i = 0; i < table->Capacity(); i++) {
2556 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2558 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2559 ObjectHashTable::EntryToIndex(i)));
2560 RecordSlot(anchor, key_slot, *key_slot);
2561 Object** value_slot =
2562 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2563 ObjectHashTable::EntryToValueIndex(i)));
2564 StaticMarkingVisitor::MarkObjectByPointer(this, anchor, value_slot);
2567 weak_map_obj = weak_map->next();
2572 void MarkCompactCollector::ClearWeakMaps() {
2573 Object* weak_map_obj = encountered_weak_maps();
2574 while (weak_map_obj != Smi::FromInt(0)) {
2575 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2576 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2577 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2578 for (int i = 0; i < table->Capacity(); i++) {
2579 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2580 table->RemoveEntry(i);
2583 weak_map_obj = weak_map->next();
2584 weak_map->set_next(Smi::FromInt(0));
2586 set_encountered_weak_maps(Smi::FromInt(0));
2590 // We scavange new space simultaneously with sweeping. This is done in two
2593 // The first pass migrates all alive objects from one semispace to another or
2594 // promotes them to old space. Forwarding address is written directly into
2595 // first word of object without any encoding. If object is dead we write
2596 // NULL as a forwarding address.
2598 // The second pass updates pointers to new space in all spaces. It is possible
2599 // to encounter pointers to dead new space objects during traversal of pointers
2600 // to new space. We should clear them to avoid encountering them during next
2601 // pointer iteration. This is an issue if the store buffer overflows and we
2602 // have to scan the entire old space, including dead objects, looking for
2603 // pointers to new space.
2604 void MarkCompactCollector::MigrateObject(Address dst,
2607 AllocationSpace dest) {
2608 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2609 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2610 Address src_slot = src;
2611 Address dst_slot = dst;
2612 ASSERT(IsAligned(size, kPointerSize));
2614 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2615 Object* value = Memory::Object_at(src_slot);
2617 Memory::Object_at(dst_slot) = value;
2619 if (heap_->InNewSpace(value)) {
2620 heap_->store_buffer()->Mark(dst_slot);
2621 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2622 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2623 &migration_slots_buffer_,
2624 reinterpret_cast<Object**>(dst_slot),
2625 SlotsBuffer::IGNORE_OVERFLOW);
2628 src_slot += kPointerSize;
2629 dst_slot += kPointerSize;
2632 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2633 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2634 Address code_entry = Memory::Address_at(code_entry_slot);
2636 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2637 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2638 &migration_slots_buffer_,
2639 SlotsBuffer::CODE_ENTRY_SLOT,
2641 SlotsBuffer::IGNORE_OVERFLOW);
2644 } else if (dest == CODE_SPACE) {
2645 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2646 heap()->MoveBlock(dst, src, size);
2647 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2648 &migration_slots_buffer_,
2649 SlotsBuffer::RELOCATED_CODE_OBJECT,
2651 SlotsBuffer::IGNORE_OVERFLOW);
2652 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2654 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2655 heap()->MoveBlock(dst, src, size);
2657 Memory::Address_at(src) = dst;
2661 // Visitor for updating pointers from live objects in old spaces to new space.
2662 // It does not expect to encounter pointers to dead objects.
2663 class PointersUpdatingVisitor: public ObjectVisitor {
2665 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2667 void VisitPointer(Object** p) {
2671 void VisitPointers(Object** start, Object** end) {
2672 for (Object** p = start; p < end; p++) UpdatePointer(p);
2675 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2676 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2677 Object* target = rinfo->target_object();
2678 VisitPointer(&target);
2679 rinfo->set_target_object(target);
2682 void VisitCodeTarget(RelocInfo* rinfo) {
2683 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2684 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2685 VisitPointer(&target);
2686 rinfo->set_target_address(Code::cast(target)->instruction_start());
2689 void VisitDebugTarget(RelocInfo* rinfo) {
2690 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2691 rinfo->IsPatchedReturnSequence()) ||
2692 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2693 rinfo->IsPatchedDebugBreakSlotSequence()));
2694 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2695 VisitPointer(&target);
2696 rinfo->set_call_address(Code::cast(target)->instruction_start());
2699 static inline void UpdateSlot(Heap* heap, Object** slot) {
2700 Object* obj = *slot;
2702 if (!obj->IsHeapObject()) return;
2704 HeapObject* heap_obj = HeapObject::cast(obj);
2706 MapWord map_word = heap_obj->map_word();
2707 if (map_word.IsForwardingAddress()) {
2708 ASSERT(heap->InFromSpace(heap_obj) ||
2709 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2710 HeapObject* target = map_word.ToForwardingAddress();
2712 ASSERT(!heap->InFromSpace(target) &&
2713 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2718 inline void UpdatePointer(Object** p) {
2719 UpdateSlot(heap_, p);
2726 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2727 ASSERT(*p == object);
2729 Address old_addr = object->address();
2731 Address new_addr = Memory::Address_at(old_addr);
2733 // The new space sweep will overwrite the map word of dead objects
2734 // with NULL. In this case we do not need to transfer this entry to
2735 // the store buffer which we are rebuilding.
2736 if (new_addr != NULL) {
2737 *p = HeapObject::FromAddress(new_addr);
2739 // We have to zap this pointer, because the store buffer may overflow later,
2740 // and then we have to scan the entire heap and we don't want to find
2741 // spurious newspace pointers in the old space.
2742 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
2747 static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2749 MapWord map_word = HeapObject::cast(*p)->map_word();
2751 if (map_word.IsForwardingAddress()) {
2752 return HeapObject::cast(map_word.ToForwardingAddress());
2755 return HeapObject::cast(*p);
2759 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2763 if (object_size > Page::kMaxNonCodeHeapObjectSize) {
2764 MaybeObject* maybe_result =
2765 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2766 if (maybe_result->ToObject(&result)) {
2767 HeapObject* target = HeapObject::cast(result);
2768 MigrateObject(target->address(),
2772 heap()->mark_compact_collector()->tracer()->
2773 increment_promoted_objects_size(object_size);
2777 OldSpace* target_space = heap()->TargetSpace(object);
2779 ASSERT(target_space == heap()->old_pointer_space() ||
2780 target_space == heap()->old_data_space());
2781 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2782 if (maybe_result->ToObject(&result)) {
2783 HeapObject* target = HeapObject::cast(result);
2784 MigrateObject(target->address(),
2787 target_space->identity());
2788 heap()->mark_compact_collector()->tracer()->
2789 increment_promoted_objects_size(object_size);
2798 void MarkCompactCollector::EvacuateNewSpace() {
2799 // There are soft limits in the allocation code, designed trigger a mark
2800 // sweep collection by failing allocations. But since we are already in
2801 // a mark-sweep allocation, there is no sense in trying to trigger one.
2802 AlwaysAllocateScope scope;
2803 heap()->CheckNewSpaceExpansionCriteria();
2805 NewSpace* new_space = heap()->new_space();
2807 // Store allocation range before flipping semispaces.
2808 Address from_bottom = new_space->bottom();
2809 Address from_top = new_space->top();
2811 // Flip the semispaces. After flipping, to space is empty, from space has
2814 new_space->ResetAllocationInfo();
2816 int survivors_size = 0;
2818 // First pass: traverse all objects in inactive semispace, remove marks,
2819 // migrate live objects and write forwarding addresses. This stage puts
2820 // new entries in the store buffer and may cause some pages to be marked
2821 // scan-on-scavenge.
2822 SemiSpaceIterator from_it(from_bottom, from_top);
2823 for (HeapObject* object = from_it.Next();
2825 object = from_it.Next()) {
2826 MarkBit mark_bit = Marking::MarkBitFrom(object);
2827 if (mark_bit.Get()) {
2829 // Don't bother decrementing live bytes count. We'll discard the
2830 // entire page at the end.
2831 int size = object->Size();
2832 survivors_size += size;
2834 // Aggressively promote young survivors to the old space.
2835 if (TryPromoteObject(object, size)) {
2839 // Promotion failed. Just migrate object to another semispace.
2840 MaybeObject* allocation = new_space->AllocateRaw(size);
2841 if (allocation->IsFailure()) {
2842 if (!new_space->AddFreshPage()) {
2843 // Shouldn't happen. We are sweeping linearly, and to-space
2844 // has the same number of pages as from-space, so there is
2848 allocation = new_space->AllocateRaw(size);
2849 ASSERT(!allocation->IsFailure());
2851 Object* target = allocation->ToObjectUnchecked();
2853 MigrateObject(HeapObject::cast(target)->address(),
2858 // Process the dead object before we write a NULL into its header.
2859 LiveObjectList::ProcessNonLive(object);
2861 // Mark dead objects in the new space with null in their map field.
2862 Memory::Address_at(object->address()) = NULL;
2866 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2867 new_space->set_age_mark(new_space->top());
2871 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2872 AlwaysAllocateScope always_allocate;
2873 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2874 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2875 MarkBit::CellType* cells = p->markbits()->cells();
2876 p->MarkSweptPrecisely();
2878 int last_cell_index =
2879 Bitmap::IndexToCell(
2880 Bitmap::CellAlignIndex(
2881 p->AddressToMarkbitIndex(p->area_end())));
2883 Address cell_base = p->area_start();
2884 int cell_index = Bitmap::IndexToCell(
2885 Bitmap::CellAlignIndex(
2886 p->AddressToMarkbitIndex(cell_base)));
2891 cell_index < last_cell_index;
2892 cell_index++, cell_base += 32 * kPointerSize) {
2893 ASSERT((unsigned)cell_index ==
2894 Bitmap::IndexToCell(
2895 Bitmap::CellAlignIndex(
2896 p->AddressToMarkbitIndex(cell_base))));
2897 if (cells[cell_index] == 0) continue;
2899 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2900 for (int i = 0; i < live_objects; i++) {
2901 Address object_addr = cell_base + offsets[i] * kPointerSize;
2902 HeapObject* object = HeapObject::FromAddress(object_addr);
2903 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2905 int size = object->Size();
2907 MaybeObject* target = space->AllocateRaw(size);
2908 if (target->IsFailure()) {
2909 // OS refused to give us memory.
2910 V8::FatalProcessOutOfMemory("Evacuation");
2914 Object* target_object = target->ToObjectUnchecked();
2916 MigrateObject(HeapObject::cast(target_object)->address(),
2920 ASSERT(object->map_word().IsForwardingAddress());
2923 // Clear marking bits for current cell.
2924 cells[cell_index] = 0;
2926 p->ResetLiveBytes();
2930 void MarkCompactCollector::EvacuatePages() {
2931 int npages = evacuation_candidates_.length();
2932 for (int i = 0; i < npages; i++) {
2933 Page* p = evacuation_candidates_[i];
2934 ASSERT(p->IsEvacuationCandidate() ||
2935 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2936 if (p->IsEvacuationCandidate()) {
2937 // During compaction we might have to request a new page.
2938 // Check that space still have room for that.
2939 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2940 EvacuateLiveObjectsFromPage(p);
2942 // Without room for expansion evacuation is not guaranteed to succeed.
2943 // Pessimistically abandon unevacuated pages.
2944 for (int j = i; j < npages; j++) {
2945 Page* page = evacuation_candidates_[j];
2946 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2947 page->ClearEvacuationCandidate();
2948 page->SetFlag(Page::RESCAN_ON_EVACUATION);
2957 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2959 virtual Object* RetainAs(Object* object) {
2960 if (object->IsHeapObject()) {
2961 HeapObject* heap_object = HeapObject::cast(object);
2962 MapWord map_word = heap_object->map_word();
2963 if (map_word.IsForwardingAddress()) {
2964 return map_word.ToForwardingAddress();
2972 static inline void UpdateSlot(ObjectVisitor* v,
2973 SlotsBuffer::SlotType slot_type,
2975 switch (slot_type) {
2976 case SlotsBuffer::CODE_TARGET_SLOT: {
2977 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
2981 case SlotsBuffer::CODE_ENTRY_SLOT: {
2982 v->VisitCodeEntry(addr);
2985 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2986 HeapObject* obj = HeapObject::FromAddress(addr);
2987 Code::cast(obj)->CodeIterateBody(v);
2990 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2991 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
2992 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2995 case SlotsBuffer::JS_RETURN_SLOT: {
2996 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
2997 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
3000 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3001 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3014 SWEEP_AND_VISIT_LIVE_OBJECTS
3018 enum SkipListRebuildingMode {
3024 // Sweep a space precisely. After this has been done the space can
3025 // be iterated precisely, hitting only the live objects. Code space
3026 // is always swept precisely because we want to be able to iterate
3027 // over it. Map space is swept precisely, because it is not compacted.
3028 // Slots in live objects pointing into evacuation candidates are updated
3030 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3031 static void SweepPrecisely(PagedSpace* space,
3034 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3035 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3036 space->identity() == CODE_SPACE);
3037 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3039 MarkBit::CellType* cells = p->markbits()->cells();
3040 p->MarkSweptPrecisely();
3042 int last_cell_index =
3043 Bitmap::IndexToCell(
3044 Bitmap::CellAlignIndex(
3045 p->AddressToMarkbitIndex(p->area_end())));
3047 Address free_start = p->area_start();
3049 Bitmap::IndexToCell(
3050 Bitmap::CellAlignIndex(
3051 p->AddressToMarkbitIndex(free_start)));
3053 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3054 Address object_address = free_start;
3057 SkipList* skip_list = p->skip_list();
3058 int curr_region = -1;
3059 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3064 cell_index < last_cell_index;
3065 cell_index++, object_address += 32 * kPointerSize) {
3066 ASSERT((unsigned)cell_index ==
3067 Bitmap::IndexToCell(
3068 Bitmap::CellAlignIndex(
3069 p->AddressToMarkbitIndex(object_address))));
3070 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
3072 for ( ; live_objects != 0; live_objects--) {
3073 Address free_end = object_address + offsets[live_index++] * kPointerSize;
3074 if (free_end != free_start) {
3075 space->Free(free_start, static_cast<int>(free_end - free_start));
3077 HeapObject* live_object = HeapObject::FromAddress(free_end);
3078 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3079 Map* map = live_object->map();
3080 int size = live_object->SizeFromMap(map);
3081 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3082 live_object->IterateBody(map->instance_type(), size, v);
3084 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3085 int new_region_start =
3086 SkipList::RegionNumber(free_end);
3087 int new_region_end =
3088 SkipList::RegionNumber(free_end + size - kPointerSize);
3089 if (new_region_start != curr_region ||
3090 new_region_end != curr_region) {
3091 skip_list->AddObject(free_end, size);
3092 curr_region = new_region_end;
3095 free_start = free_end + size;
3097 // Clear marking bits for current cell.
3098 cells[cell_index] = 0;
3100 if (free_start != p->area_end()) {
3101 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3103 p->ResetLiveBytes();
3107 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3108 Page* p = Page::FromAddress(code->address());
3110 if (p->IsEvacuationCandidate() ||
3111 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3115 Address code_start = code->address();
3116 Address code_end = code_start + code->Size();
3118 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3119 uint32_t end_index =
3120 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3122 Bitmap* b = p->markbits();
3124 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3125 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3127 MarkBit::CellType* start_cell = start_mark_bit.cell();
3128 MarkBit::CellType* end_cell = end_mark_bit.cell();
3131 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3132 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3134 if (start_cell == end_cell) {
3135 *start_cell |= start_mask & end_mask;
3137 *start_cell |= start_mask;
3138 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3141 *end_cell |= end_mask;
3144 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3153 static bool IsOnInvalidatedCodeObject(Address addr) {
3154 // We did not record any slots in large objects thus
3155 // we can safely go to the page from the slot address.
3156 Page* p = Page::FromAddress(addr);
3158 // First check owner's identity because old pointer and old data spaces
3159 // are swept lazily and might still have non-zero mark-bits on some
3161 if (p->owner()->identity() != CODE_SPACE) return false;
3163 // In code space only bits on evacuation candidates (but we don't record
3164 // any slots on them) and under invalidated code objects are non-zero.
3166 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3168 return mark_bit.Get();
3172 void MarkCompactCollector::InvalidateCode(Code* code) {
3173 if (heap_->incremental_marking()->IsCompacting() &&
3174 !ShouldSkipEvacuationSlotRecording(code)) {
3175 ASSERT(compacting_);
3177 // If the object is white than no slots were recorded on it yet.
3178 MarkBit mark_bit = Marking::MarkBitFrom(code);
3179 if (Marking::IsWhite(mark_bit)) return;
3181 invalidated_code_.Add(code);
3186 bool MarkCompactCollector::MarkInvalidatedCode() {
3187 bool code_marked = false;
3189 int length = invalidated_code_.length();
3190 for (int i = 0; i < length; i++) {
3191 Code* code = invalidated_code_[i];
3193 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3202 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3203 int length = invalidated_code_.length();
3204 for (int i = 0; i < length; i++) {
3205 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3210 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3211 int length = invalidated_code_.length();
3212 for (int i = 0; i < length; i++) {
3213 Code* code = invalidated_code_[i];
3215 code->Iterate(visitor);
3216 SetMarkBitsUnderInvalidatedCode(code, false);
3219 invalidated_code_.Rewind(0);
3223 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3224 bool code_slots_filtering_required;
3225 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3226 code_slots_filtering_required = MarkInvalidatedCode();
3232 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3236 // Second pass: find pointers to new space and update them.
3237 PointersUpdatingVisitor updating_visitor(heap());
3239 { GCTracer::Scope gc_scope(tracer_,
3240 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3241 // Update pointers in to space.
3242 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3243 heap()->new_space()->top());
3244 for (HeapObject* object = to_it.Next();
3246 object = to_it.Next()) {
3247 Map* map = object->map();
3248 object->IterateBody(map->instance_type(),
3249 object->SizeFromMap(map),
3254 { GCTracer::Scope gc_scope(tracer_,
3255 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3257 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3258 LiveObjectList::IterateElements(&updating_visitor);
3261 { GCTracer::Scope gc_scope(tracer_,
3262 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3263 StoreBufferRebuildScope scope(heap_,
3264 heap_->store_buffer(),
3265 &Heap::ScavengeStoreBufferCallback);
3266 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3269 { GCTracer::Scope gc_scope(tracer_,
3270 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3271 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3272 migration_slots_buffer_,
3273 code_slots_filtering_required);
3274 if (FLAG_trace_fragmentation) {
3275 PrintF(" migration slots buffer: %d\n",
3276 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3279 if (compacting_ && was_marked_incrementally_) {
3280 // It's difficult to filter out slots recorded for large objects.
3281 LargeObjectIterator it(heap_->lo_space());
3282 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3283 // LargeObjectSpace is not swept yet thus we have to skip
3284 // dead objects explicitly.
3285 if (!IsMarked(obj)) continue;
3287 Page* p = Page::FromAddress(obj->address());
3288 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3289 obj->Iterate(&updating_visitor);
3290 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3296 int npages = evacuation_candidates_.length();
3297 { GCTracer::Scope gc_scope(
3298 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3299 for (int i = 0; i < npages; i++) {
3300 Page* p = evacuation_candidates_[i];
3301 ASSERT(p->IsEvacuationCandidate() ||
3302 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3304 if (p->IsEvacuationCandidate()) {
3305 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3307 code_slots_filtering_required);
3308 if (FLAG_trace_fragmentation) {
3309 PrintF(" page %p slots buffer: %d\n",
3310 reinterpret_cast<void*>(p),
3311 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3314 // Important: skip list should be cleared only after roots were updated
3315 // because root iteration traverses the stack and might have to find
3316 // code objects from non-updated pc pointing into evacuation candidate.
3317 SkipList* list = p->skip_list();
3318 if (list != NULL) list->Clear();
3320 if (FLAG_gc_verbose) {
3321 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3322 reinterpret_cast<intptr_t>(p));
3324 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3325 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3327 switch (space->identity()) {
3328 case OLD_DATA_SPACE:
3329 SweepConservatively(space, p);
3331 case OLD_POINTER_SPACE:
3332 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3333 space, p, &updating_visitor);
3336 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3337 space, p, &updating_visitor);
3347 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3349 // Update pointers from cells.
3350 HeapObjectIterator cell_iterator(heap_->cell_space());
3351 for (HeapObject* cell = cell_iterator.Next();
3353 cell = cell_iterator.Next()) {
3354 if (cell->IsJSGlobalPropertyCell()) {
3355 Address value_address =
3356 reinterpret_cast<Address>(cell) +
3357 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3358 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3362 // Update pointer from the global contexts list.
3363 updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3365 heap_->symbol_table()->Iterate(&updating_visitor);
3367 // Update pointers from external string table.
3368 heap_->UpdateReferencesInExternalStringTable(
3369 &UpdateReferenceInExternalStringTableEntry);
3371 if (!FLAG_watch_ic_patching) {
3372 // Update JSFunction pointers from the runtime profiler.
3373 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3377 EvacuationWeakObjectRetainer evacuation_object_retainer;
3378 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3380 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3382 ProcessInvalidatedCode(&updating_visitor);
3384 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3387 if (FLAG_verify_heap) {
3388 VerifyEvacuation(heap_);
3392 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3393 ASSERT(migration_slots_buffer_ == NULL);
3394 for (int i = 0; i < npages; i++) {
3395 Page* p = evacuation_candidates_[i];
3396 if (!p->IsEvacuationCandidate()) continue;
3397 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3398 space->Free(p->area_start(), p->area_size());
3399 p->set_scan_on_scavenge(false);
3400 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3401 p->ResetLiveBytes();
3402 space->ReleasePage(p);
3404 evacuation_candidates_.Rewind(0);
3405 compacting_ = false;
3409 static const int kStartTableEntriesPerLine = 5;
3410 static const int kStartTableLines = 171;
3411 static const int kStartTableInvalidLine = 127;
3412 static const int kStartTableUnusedEntry = 126;
3414 #define _ kStartTableUnusedEntry
3415 #define X kStartTableInvalidLine
3416 // Mark-bit to object start offset table.
3418 // The line is indexed by the mark bits in a byte. The first number on
3419 // the line describes the number of live object starts for the line and the
3420 // other numbers on the line describe the offsets (in words) of the object
3423 // Since objects are at least 2 words large we don't have entries for two
3424 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3425 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3436 2, 1, 3, _, _, // 10
3437 X, _, _, _, _, // 11
3438 X, _, _, _, _, // 12
3439 X, _, _, _, _, // 13
3440 X, _, _, _, _, // 14
3441 X, _, _, _, _, // 15
3442 1, 4, _, _, _, // 16
3443 2, 0, 4, _, _, // 17
3444 2, 1, 4, _, _, // 18
3445 X, _, _, _, _, // 19
3446 2, 2, 4, _, _, // 20
3447 3, 0, 2, 4, _, // 21
3448 X, _, _, _, _, // 22
3449 X, _, _, _, _, // 23
3450 X, _, _, _, _, // 24
3451 X, _, _, _, _, // 25
3452 X, _, _, _, _, // 26
3453 X, _, _, _, _, // 27
3454 X, _, _, _, _, // 28
3455 X, _, _, _, _, // 29
3456 X, _, _, _, _, // 30
3457 X, _, _, _, _, // 31
3458 1, 5, _, _, _, // 32
3459 2, 0, 5, _, _, // 33
3460 2, 1, 5, _, _, // 34
3461 X, _, _, _, _, // 35
3462 2, 2, 5, _, _, // 36
3463 3, 0, 2, 5, _, // 37
3464 X, _, _, _, _, // 38
3465 X, _, _, _, _, // 39
3466 2, 3, 5, _, _, // 40
3467 3, 0, 3, 5, _, // 41
3468 3, 1, 3, 5, _, // 42
3469 X, _, _, _, _, // 43
3470 X, _, _, _, _, // 44
3471 X, _, _, _, _, // 45
3472 X, _, _, _, _, // 46
3473 X, _, _, _, _, // 47
3474 X, _, _, _, _, // 48
3475 X, _, _, _, _, // 49
3476 X, _, _, _, _, // 50
3477 X, _, _, _, _, // 51
3478 X, _, _, _, _, // 52
3479 X, _, _, _, _, // 53
3480 X, _, _, _, _, // 54
3481 X, _, _, _, _, // 55
3482 X, _, _, _, _, // 56
3483 X, _, _, _, _, // 57
3484 X, _, _, _, _, // 58
3485 X, _, _, _, _, // 59
3486 X, _, _, _, _, // 60
3487 X, _, _, _, _, // 61
3488 X, _, _, _, _, // 62
3489 X, _, _, _, _, // 63
3490 1, 6, _, _, _, // 64
3491 2, 0, 6, _, _, // 65
3492 2, 1, 6, _, _, // 66
3493 X, _, _, _, _, // 67
3494 2, 2, 6, _, _, // 68
3495 3, 0, 2, 6, _, // 69
3496 X, _, _, _, _, // 70
3497 X, _, _, _, _, // 71
3498 2, 3, 6, _, _, // 72
3499 3, 0, 3, 6, _, // 73
3500 3, 1, 3, 6, _, // 74
3501 X, _, _, _, _, // 75
3502 X, _, _, _, _, // 76
3503 X, _, _, _, _, // 77
3504 X, _, _, _, _, // 78
3505 X, _, _, _, _, // 79
3506 2, 4, 6, _, _, // 80
3507 3, 0, 4, 6, _, // 81
3508 3, 1, 4, 6, _, // 82
3509 X, _, _, _, _, // 83
3510 3, 2, 4, 6, _, // 84
3511 4, 0, 2, 4, 6, // 85
3512 X, _, _, _, _, // 86
3513 X, _, _, _, _, // 87
3514 X, _, _, _, _, // 88
3515 X, _, _, _, _, // 89
3516 X, _, _, _, _, // 90
3517 X, _, _, _, _, // 91
3518 X, _, _, _, _, // 92
3519 X, _, _, _, _, // 93
3520 X, _, _, _, _, // 94
3521 X, _, _, _, _, // 95
3522 X, _, _, _, _, // 96
3523 X, _, _, _, _, // 97
3524 X, _, _, _, _, // 98
3525 X, _, _, _, _, // 99
3526 X, _, _, _, _, // 100
3527 X, _, _, _, _, // 101
3528 X, _, _, _, _, // 102
3529 X, _, _, _, _, // 103
3530 X, _, _, _, _, // 104
3531 X, _, _, _, _, // 105
3532 X, _, _, _, _, // 106
3533 X, _, _, _, _, // 107
3534 X, _, _, _, _, // 108
3535 X, _, _, _, _, // 109
3536 X, _, _, _, _, // 110
3537 X, _, _, _, _, // 111
3538 X, _, _, _, _, // 112
3539 X, _, _, _, _, // 113
3540 X, _, _, _, _, // 114
3541 X, _, _, _, _, // 115
3542 X, _, _, _, _, // 116
3543 X, _, _, _, _, // 117
3544 X, _, _, _, _, // 118
3545 X, _, _, _, _, // 119
3546 X, _, _, _, _, // 120
3547 X, _, _, _, _, // 121
3548 X, _, _, _, _, // 122
3549 X, _, _, _, _, // 123
3550 X, _, _, _, _, // 124
3551 X, _, _, _, _, // 125
3552 X, _, _, _, _, // 126
3553 X, _, _, _, _, // 127
3554 1, 7, _, _, _, // 128
3555 2, 0, 7, _, _, // 129
3556 2, 1, 7, _, _, // 130
3557 X, _, _, _, _, // 131
3558 2, 2, 7, _, _, // 132
3559 3, 0, 2, 7, _, // 133
3560 X, _, _, _, _, // 134
3561 X, _, _, _, _, // 135
3562 2, 3, 7, _, _, // 136
3563 3, 0, 3, 7, _, // 137
3564 3, 1, 3, 7, _, // 138
3565 X, _, _, _, _, // 139
3566 X, _, _, _, _, // 140
3567 X, _, _, _, _, // 141
3568 X, _, _, _, _, // 142
3569 X, _, _, _, _, // 143
3570 2, 4, 7, _, _, // 144
3571 3, 0, 4, 7, _, // 145
3572 3, 1, 4, 7, _, // 146
3573 X, _, _, _, _, // 147
3574 3, 2, 4, 7, _, // 148
3575 4, 0, 2, 4, 7, // 149
3576 X, _, _, _, _, // 150
3577 X, _, _, _, _, // 151
3578 X, _, _, _, _, // 152
3579 X, _, _, _, _, // 153
3580 X, _, _, _, _, // 154
3581 X, _, _, _, _, // 155
3582 X, _, _, _, _, // 156
3583 X, _, _, _, _, // 157
3584 X, _, _, _, _, // 158
3585 X, _, _, _, _, // 159
3586 2, 5, 7, _, _, // 160
3587 3, 0, 5, 7, _, // 161
3588 3, 1, 5, 7, _, // 162
3589 X, _, _, _, _, // 163
3590 3, 2, 5, 7, _, // 164
3591 4, 0, 2, 5, 7, // 165
3592 X, _, _, _, _, // 166
3593 X, _, _, _, _, // 167
3594 3, 3, 5, 7, _, // 168
3595 4, 0, 3, 5, 7, // 169
3596 4, 1, 3, 5, 7 // 170
3602 // Takes a word of mark bits. Returns the number of objects that start in the
3603 // range. Puts the offsets of the words in the supplied array.
3604 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3608 // No consecutive 1 bits.
3609 ASSERT((mark_bits & 0x180) != 0x180);
3610 ASSERT((mark_bits & 0x18000) != 0x18000);
3611 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3613 while (mark_bits != 0) {
3614 int byte = (mark_bits & 0xff);
3617 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3618 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3619 int objects_in_these_8_words = table[0];
3620 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3621 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3622 for (int i = 0; i < objects_in_these_8_words; i++) {
3623 starts[objects++] = offset + table[1 + i];
3632 static inline Address DigestFreeStart(Address approximate_free_start,
3633 uint32_t free_start_cell) {
3634 ASSERT(free_start_cell != 0);
3636 // No consecutive 1 bits.
3637 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3640 uint32_t cell = free_start_cell;
3641 int offset_of_last_live;
3642 if ((cell & 0x80000000u) != 0) {
3643 // This case would overflow below.
3644 offset_of_last_live = 31;
3646 // Remove all but one bit, the most significant. This is an optimization
3647 // that may or may not be worthwhile.
3653 cell = (cell + 1) >> 1;
3654 int live_objects = MarkWordToObjectStarts(cell, offsets);
3655 ASSERT(live_objects == 1);
3656 offset_of_last_live = offsets[live_objects - 1];
3658 Address last_live_start =
3659 approximate_free_start + offset_of_last_live * kPointerSize;
3660 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3661 Address free_start = last_live_start + last_live->Size();
3666 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3669 // No consecutive 1 bits.
3670 ASSERT((cell & (cell << 1)) == 0);
3673 if (cell == 0x80000000u) { // Avoid overflow below.
3674 return block_address + 31 * kPointerSize;
3676 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3677 ASSERT((first_set_bit & cell) == first_set_bit);
3678 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3679 ASSERT(live_objects == 1);
3681 return block_address + offsets[0] * kPointerSize;
3685 // Sweeps a space conservatively. After this has been done the larger free
3686 // spaces have been put on the free list and the smaller ones have been
3687 // ignored and left untouched. A free space is always either ignored or put
3688 // on the free list, never split up into two parts. This is important
3689 // because it means that any FreeSpace maps left actually describe a region of
3690 // memory that can be ignored when scanning. Dead objects other than free
3691 // spaces will not contain the free space map.
3692 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
3693 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3694 MarkBit::CellType* cells = p->markbits()->cells();
3695 p->MarkSweptConservatively();
3697 int last_cell_index =
3698 Bitmap::IndexToCell(
3699 Bitmap::CellAlignIndex(
3700 p->AddressToMarkbitIndex(p->area_end())));
3703 Bitmap::IndexToCell(
3704 Bitmap::CellAlignIndex(
3705 p->AddressToMarkbitIndex(p->area_start())));
3707 intptr_t freed_bytes = 0;
3709 // This is the start of the 32 word block that we are currently looking at.
3710 Address block_address = p->area_start();
3712 // Skip over all the dead objects at the start of the page and mark them free.
3714 cell_index < last_cell_index;
3715 cell_index++, block_address += 32 * kPointerSize) {
3716 if (cells[cell_index] != 0) break;
3718 size_t size = block_address - p->area_start();
3719 if (cell_index == last_cell_index) {
3720 freed_bytes += static_cast<int>(space->Free(p->area_start(),
3721 static_cast<int>(size)));
3722 ASSERT_EQ(0, p->LiveBytes());
3725 // Grow the size of the start-of-page free space a little to get up to the
3726 // first live object.
3727 Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3728 // Free the first free space.
3729 size = free_end - p->area_start();
3730 freed_bytes += space->Free(p->area_start(),
3731 static_cast<int>(size));
3732 // The start of the current free area is represented in undigested form by
3733 // the address of the last 32-word section that contained a live object and
3734 // the marking bitmap for that cell, which describes where the live object
3735 // started. Unless we find a large free space in the bitmap we will not
3736 // digest this pair into a real address. We start the iteration here at the
3737 // first word in the marking bit map that indicates a live object.
3738 Address free_start = block_address;
3739 uint32_t free_start_cell = cells[cell_index];
3742 cell_index < last_cell_index;
3743 cell_index++, block_address += 32 * kPointerSize) {
3744 ASSERT((unsigned)cell_index ==
3745 Bitmap::IndexToCell(
3746 Bitmap::CellAlignIndex(
3747 p->AddressToMarkbitIndex(block_address))));
3748 uint32_t cell = cells[cell_index];
3750 // We have a live object. Check approximately whether it is more than 32
3751 // words since the last live object.
3752 if (block_address - free_start > 32 * kPointerSize) {
3753 free_start = DigestFreeStart(free_start, free_start_cell);
3754 if (block_address - free_start > 32 * kPointerSize) {
3755 // Now that we know the exact start of the free space it still looks
3756 // like we have a large enough free space to be worth bothering with.
3757 // so now we need to find the start of the first live object at the
3758 // end of the free space.
3759 free_end = StartOfLiveObject(block_address, cell);
3760 freed_bytes += space->Free(free_start,
3761 static_cast<int>(free_end - free_start));
3764 // Update our undigested record of where the current free area started.
3765 free_start = block_address;
3766 free_start_cell = cell;
3767 // Clear marking bits for current cell.
3768 cells[cell_index] = 0;
3772 // Handle the free space at the end of the page.
3773 if (block_address - free_start > 32 * kPointerSize) {
3774 free_start = DigestFreeStart(free_start, free_start_cell);
3775 freed_bytes += space->Free(free_start,
3776 static_cast<int>(block_address - free_start));
3779 p->ResetLiveBytes();
3784 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3785 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3786 sweeper == LAZY_CONSERVATIVE);
3788 space->ClearStats();
3790 PageIterator it(space);
3792 intptr_t freed_bytes = 0;
3793 int pages_swept = 0;
3794 intptr_t newspace_size = space->heap()->new_space()->Size();
3795 bool lazy_sweeping_active = false;
3796 bool unused_page_present = false;
3798 intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects();
3799 intptr_t space_left =
3800 Min(heap()->OldGenPromotionLimit(old_space_size),
3801 heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3803 while (it.has_next()) {
3804 Page* p = it.next();
3806 // Clear sweeping flags indicating that marking bits are still intact.
3807 p->ClearSweptPrecisely();
3808 p->ClearSweptConservatively();
3810 if (p->IsEvacuationCandidate()) {
3811 ASSERT(evacuation_candidates_.length() > 0);
3815 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3816 // Will be processed in EvacuateNewSpaceAndCandidates.
3820 // One unused page is kept, all further are released before sweeping them.
3821 if (p->LiveBytes() == 0) {
3822 if (unused_page_present) {
3823 if (FLAG_gc_verbose) {
3824 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3825 reinterpret_cast<intptr_t>(p));
3827 // Adjust unswept free bytes because releasing a page expects said
3828 // counter to be accurate for unswept pages.
3829 space->IncreaseUnsweptFreeBytes(p);
3830 space->ReleasePage(p);
3833 unused_page_present = true;
3836 if (lazy_sweeping_active) {
3837 if (FLAG_gc_verbose) {
3838 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3839 reinterpret_cast<intptr_t>(p));
3841 space->IncreaseUnsweptFreeBytes(p);
3846 case CONSERVATIVE: {
3847 if (FLAG_gc_verbose) {
3848 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3849 reinterpret_cast<intptr_t>(p));
3851 SweepConservatively(space, p);
3855 case LAZY_CONSERVATIVE: {
3856 if (FLAG_gc_verbose) {
3857 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3858 reinterpret_cast<intptr_t>(p));
3860 freed_bytes += SweepConservatively(space, p);
3862 if (space_left + freed_bytes > newspace_size) {
3863 space->SetPagesToSweep(p->next_page());
3864 lazy_sweeping_active = true;
3866 if (FLAG_gc_verbose) {
3867 PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3874 if (FLAG_gc_verbose) {
3875 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3876 reinterpret_cast<intptr_t>(p));
3878 if (space->identity() == CODE_SPACE) {
3879 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3881 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3892 if (FLAG_gc_verbose) {
3893 PrintF("SweepSpace: %s (%d pages swept)\n",
3894 AllocationSpaceName(space->identity()),
3898 // Give pages that are queued to be freed back to the OS.
3899 heap()->FreeQueuedChunks();
3903 void MarkCompactCollector::SweepSpaces() {
3904 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3906 state_ = SWEEP_SPACES;
3908 SweeperType how_to_sweep =
3909 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3910 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
3911 if (sweep_precisely_) how_to_sweep = PRECISE;
3912 // Noncompacting collections simply sweep the spaces to clear the mark
3913 // bits and free the nonlive blocks (for old and map spaces). We sweep
3914 // the map space last because freeing non-live maps overwrites them and
3915 // the other spaces rely on possibly non-live maps to get the sizes for
3916 // non-live objects.
3917 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3918 SweepSpace(heap()->old_data_space(), how_to_sweep);
3920 RemoveDeadInvalidatedCode();
3921 SweepSpace(heap()->code_space(), PRECISE);
3923 SweepSpace(heap()->cell_space(), PRECISE);
3925 EvacuateNewSpaceAndCandidates();
3927 // ClearNonLiveTransitions depends on precise sweeping of map space to
3928 // detect whether unmarked map became dead in this collection or in one
3929 // of the previous ones.
3930 SweepSpace(heap()->map_space(), PRECISE);
3932 // Deallocate unmarked objects and clear marked bits for marked objects.
3933 heap_->lo_space()->FreeUnmarkedObjects();
3937 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3939 if (code_flusher_ != NULL) return;
3940 code_flusher_ = new CodeFlusher(heap()->isolate());
3942 if (code_flusher_ == NULL) return;
3943 delete code_flusher_;
3944 code_flusher_ = NULL;
3949 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3950 // Our profiling tools do not expect intersections between
3951 // code objects. We should either reenable it or change our tools.
3952 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3954 #ifdef ENABLE_GDB_JIT_INTERFACE
3955 if (obj->IsCode()) {
3956 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3959 if (obj->IsCode()) {
3960 PROFILE(isolate, CodeDeleteEvent(obj->address()));
3965 void MarkCompactCollector::Initialize() {
3966 StaticMarkingVisitor::Initialize();
3970 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
3971 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3975 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
3976 SlotsBuffer** buffer_address,
3979 AdditionMode mode) {
3980 SlotsBuffer* buffer = *buffer_address;
3981 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
3982 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
3983 allocator->DeallocateChain(buffer_address);
3986 buffer = allocator->AllocateBuffer(buffer);
3987 *buffer_address = buffer;
3989 ASSERT(buffer->HasSpaceForTypedSlot());
3990 buffer->Add(reinterpret_cast<ObjectSlot>(type));
3991 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
3996 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
3997 if (RelocInfo::IsCodeTarget(rmode)) {
3998 return SlotsBuffer::CODE_TARGET_SLOT;
3999 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4000 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4001 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4002 return SlotsBuffer::DEBUG_TARGET_SLOT;
4003 } else if (RelocInfo::IsJSReturn(rmode)) {
4004 return SlotsBuffer::JS_RETURN_SLOT;
4007 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4011 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4012 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4013 if (target_page->IsEvacuationCandidate() &&
4014 (rinfo->host() == NULL ||
4015 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4016 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4017 target_page->slots_buffer_address(),
4018 SlotTypeForRMode(rinfo->rmode()),
4020 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4021 EvictEvacuationCandidate(target_page);
4027 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4028 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4029 if (target_page->IsEvacuationCandidate() &&
4030 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4031 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4032 target_page->slots_buffer_address(),
4033 SlotsBuffer::CODE_ENTRY_SLOT,
4035 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4036 EvictEvacuationCandidate(target_page);
4042 static inline SlotsBuffer::SlotType DecodeSlotType(
4043 SlotsBuffer::ObjectSlot slot) {
4044 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4048 void SlotsBuffer::UpdateSlots(Heap* heap) {
4049 PointersUpdatingVisitor v(heap);
4051 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4052 ObjectSlot slot = slots_[slot_idx];
4053 if (!IsTypedSlot(slot)) {
4054 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4057 ASSERT(slot_idx < idx_);
4059 DecodeSlotType(slot),
4060 reinterpret_cast<Address>(slots_[slot_idx]));
4066 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4067 PointersUpdatingVisitor v(heap);
4069 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4070 ObjectSlot slot = slots_[slot_idx];
4071 if (!IsTypedSlot(slot)) {
4072 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4073 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4077 ASSERT(slot_idx < idx_);
4078 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4079 if (!IsOnInvalidatedCodeObject(pc)) {
4081 DecodeSlotType(slot),
4082 reinterpret_cast<Address>(slots_[slot_idx]));
4089 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4090 return new SlotsBuffer(next_buffer);
4094 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4099 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4100 SlotsBuffer* buffer = *buffer_address;
4101 while (buffer != NULL) {
4102 SlotsBuffer* next_buffer = buffer->next();
4103 DeallocateBuffer(buffer);
4104 buffer = next_buffer;
4106 *buffer_address = NULL;
4110 } } // namespace v8::internal