1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "code-stubs.h"
31 #include "compilation-cache.h"
32 #include "deoptimizer.h"
33 #include "execution.h"
35 #include "global-handles.h"
36 #include "heap-profiler.h"
38 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h"
41 #include "objects-visiting.h"
42 #include "objects-visiting-inl.h"
43 #include "stub-cache.h"
49 const char* Marking::kWhiteBitPattern = "00";
50 const char* Marking::kBlackBitPattern = "10";
51 const char* Marking::kGreyBitPattern = "11";
52 const char* Marking::kImpossibleBitPattern = "01";
55 // -------------------------------------------------------------------------
56 // MarkCompactCollector
58 MarkCompactCollector::MarkCompactCollector() : // NOLINT
62 sweep_precisely_(false),
63 reduce_memory_footprint_(false),
64 abort_incremental_marking_(false),
66 was_marked_incrementally_(false),
67 collect_maps_(FLAG_collect_maps),
68 flush_monomorphic_ics_(false),
70 migration_slots_buffer_(NULL),
73 encountered_weak_maps_(NULL) { }
77 class VerifyMarkingVisitor: public ObjectVisitor {
79 void VisitPointers(Object** start, Object** end) {
80 for (Object** current = start; current < end; current++) {
81 if ((*current)->IsHeapObject()) {
82 HeapObject* object = HeapObject::cast(*current);
83 ASSERT(HEAP->mark_compact_collector()->IsMarked(object));
90 static void VerifyMarking(Address bottom, Address top) {
91 VerifyMarkingVisitor visitor;
93 Address next_object_must_be_here_or_later = bottom;
95 for (Address current = bottom;
97 current += kPointerSize) {
98 object = HeapObject::FromAddress(current);
99 if (MarkCompactCollector::IsMarked(object)) {
100 ASSERT(current >= next_object_must_be_here_or_later);
101 object->Iterate(&visitor);
102 next_object_must_be_here_or_later = current + object->Size();
108 static void VerifyMarking(NewSpace* space) {
109 Address end = space->top();
110 NewSpacePageIterator it(space->bottom(), end);
111 // The bottom position is at the start of its page. Allows us to use
112 // page->area_start() as start of range on all pages.
113 ASSERT_EQ(space->bottom(),
114 NewSpacePage::FromAddress(space->bottom())->area_start());
115 while (it.has_next()) {
116 NewSpacePage* page = it.next();
117 Address limit = it.has_next() ? page->area_end() : end;
118 ASSERT(limit == end || !page->Contains(end));
119 VerifyMarking(page->area_start(), limit);
124 static void VerifyMarking(PagedSpace* space) {
125 PageIterator it(space);
127 while (it.has_next()) {
129 VerifyMarking(p->area_start(), p->area_end());
134 static void VerifyMarking(Heap* heap) {
135 VerifyMarking(heap->old_pointer_space());
136 VerifyMarking(heap->old_data_space());
137 VerifyMarking(heap->code_space());
138 VerifyMarking(heap->cell_space());
139 VerifyMarking(heap->map_space());
140 VerifyMarking(heap->new_space());
142 VerifyMarkingVisitor visitor;
144 LargeObjectIterator it(heap->lo_space());
145 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
146 if (MarkCompactCollector::IsMarked(obj)) {
147 obj->Iterate(&visitor);
151 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
155 class VerifyEvacuationVisitor: public ObjectVisitor {
157 void VisitPointers(Object** start, Object** end) {
158 for (Object** current = start; current < end; current++) {
159 if ((*current)->IsHeapObject()) {
160 HeapObject* object = HeapObject::cast(*current);
161 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
168 static void VerifyEvacuation(Address bottom, Address top) {
169 VerifyEvacuationVisitor visitor;
171 Address next_object_must_be_here_or_later = bottom;
173 for (Address current = bottom;
175 current += kPointerSize) {
176 object = HeapObject::FromAddress(current);
177 if (MarkCompactCollector::IsMarked(object)) {
178 ASSERT(current >= next_object_must_be_here_or_later);
179 object->Iterate(&visitor);
180 next_object_must_be_here_or_later = current + object->Size();
186 static void VerifyEvacuation(NewSpace* space) {
187 NewSpacePageIterator it(space->bottom(), space->top());
188 VerifyEvacuationVisitor visitor;
190 while (it.has_next()) {
191 NewSpacePage* page = it.next();
192 Address current = page->area_start();
193 Address limit = it.has_next() ? page->area_end() : space->top();
194 ASSERT(limit == space->top() || !page->Contains(space->top()));
195 while (current < limit) {
196 HeapObject* object = HeapObject::FromAddress(current);
197 object->Iterate(&visitor);
198 current += object->Size();
204 static void VerifyEvacuation(PagedSpace* space) {
205 PageIterator it(space);
207 while (it.has_next()) {
209 if (p->IsEvacuationCandidate()) continue;
210 VerifyEvacuation(p->area_start(), p->area_end());
215 static void VerifyEvacuation(Heap* heap) {
216 VerifyEvacuation(heap->old_pointer_space());
217 VerifyEvacuation(heap->old_data_space());
218 VerifyEvacuation(heap->code_space());
219 VerifyEvacuation(heap->cell_space());
220 VerifyEvacuation(heap->map_space());
221 VerifyEvacuation(heap->new_space());
223 VerifyEvacuationVisitor visitor;
224 heap->IterateStrongRoots(&visitor, VISIT_ALL);
229 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
230 p->MarkEvacuationCandidate();
231 evacuation_candidates_.Add(p);
235 static void TraceFragmentation(PagedSpace* space) {
236 int number_of_pages = space->CountTotalPages();
237 intptr_t reserved = (number_of_pages * space->AreaSize());
238 intptr_t free = reserved - space->SizeOfObjects();
239 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
240 AllocationSpaceName(space->identity()),
242 static_cast<int>(free),
243 static_cast<double>(free) * 100 / reserved);
247 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
249 ASSERT(evacuation_candidates_.length() == 0);
251 CollectEvacuationCandidates(heap()->old_pointer_space());
252 CollectEvacuationCandidates(heap()->old_data_space());
254 if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) {
255 CollectEvacuationCandidates(heap()->code_space());
256 } else if (FLAG_trace_fragmentation) {
257 TraceFragmentation(heap()->code_space());
260 if (FLAG_trace_fragmentation) {
261 TraceFragmentation(heap()->map_space());
262 TraceFragmentation(heap()->cell_space());
265 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
266 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
267 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
269 compacting_ = evacuation_candidates_.length() > 0;
276 void MarkCompactCollector::CollectGarbage() {
277 // Make sure that Prepare() has been called. The individual steps below will
278 // update the state as they proceed.
279 ASSERT(state_ == PREPARE_GC);
280 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
283 ASSERT(heap_->incremental_marking()->IsStopped());
285 if (collect_maps_) ClearNonLiveTransitions();
290 if (FLAG_verify_heap) {
291 VerifyMarking(heap_);
297 if (!collect_maps_) ReattachInitialMaps();
299 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
308 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
309 PageIterator it(space);
311 while (it.has_next()) {
313 CHECK(p->markbits()->IsClean());
314 CHECK_EQ(0, p->LiveBytes());
318 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
319 NewSpacePageIterator it(space->bottom(), space->top());
321 while (it.has_next()) {
322 NewSpacePage* p = it.next();
323 CHECK(p->markbits()->IsClean());
324 CHECK_EQ(0, p->LiveBytes());
328 void MarkCompactCollector::VerifyMarkbitsAreClean() {
329 VerifyMarkbitsAreClean(heap_->old_pointer_space());
330 VerifyMarkbitsAreClean(heap_->old_data_space());
331 VerifyMarkbitsAreClean(heap_->code_space());
332 VerifyMarkbitsAreClean(heap_->cell_space());
333 VerifyMarkbitsAreClean(heap_->map_space());
334 VerifyMarkbitsAreClean(heap_->new_space());
336 LargeObjectIterator it(heap_->lo_space());
337 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
338 MarkBit mark_bit = Marking::MarkBitFrom(obj);
339 ASSERT(Marking::IsWhite(mark_bit));
345 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
346 PageIterator it(space);
348 while (it.has_next()) {
349 Bitmap::Clear(it.next());
354 static void ClearMarkbitsInNewSpace(NewSpace* space) {
355 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
357 while (it.has_next()) {
358 Bitmap::Clear(it.next());
363 void MarkCompactCollector::ClearMarkbits() {
364 ClearMarkbitsInPagedSpace(heap_->code_space());
365 ClearMarkbitsInPagedSpace(heap_->map_space());
366 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
367 ClearMarkbitsInPagedSpace(heap_->old_data_space());
368 ClearMarkbitsInPagedSpace(heap_->cell_space());
369 ClearMarkbitsInNewSpace(heap_->new_space());
371 LargeObjectIterator it(heap_->lo_space());
372 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
373 MarkBit mark_bit = Marking::MarkBitFrom(obj);
375 mark_bit.Next().Clear();
380 bool Marking::TransferMark(Address old_start, Address new_start) {
381 // This is only used when resizing an object.
382 ASSERT(MemoryChunk::FromAddress(old_start) ==
383 MemoryChunk::FromAddress(new_start));
385 // If the mark doesn't move, we don't check the color of the object.
386 // It doesn't matter whether the object is black, since it hasn't changed
387 // size, so the adjustment to the live data count will be zero anyway.
388 if (old_start == new_start) return false;
390 MarkBit new_mark_bit = MarkBitFrom(new_start);
391 MarkBit old_mark_bit = MarkBitFrom(old_start);
394 ObjectColor old_color = Color(old_mark_bit);
397 if (Marking::IsBlack(old_mark_bit)) {
398 old_mark_bit.Clear();
399 ASSERT(IsWhite(old_mark_bit));
400 Marking::MarkBlack(new_mark_bit);
402 } else if (Marking::IsGrey(old_mark_bit)) {
403 ASSERT(heap_->incremental_marking()->IsMarking());
404 old_mark_bit.Clear();
405 old_mark_bit.Next().Clear();
406 ASSERT(IsWhite(old_mark_bit));
407 heap_->incremental_marking()->WhiteToGreyAndPush(
408 HeapObject::FromAddress(new_start), new_mark_bit);
409 heap_->incremental_marking()->RestartIfNotMarking();
413 ObjectColor new_color = Color(new_mark_bit);
414 ASSERT(new_color == old_color);
421 const char* AllocationSpaceName(AllocationSpace space) {
423 case NEW_SPACE: return "NEW_SPACE";
424 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
425 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
426 case CODE_SPACE: return "CODE_SPACE";
427 case MAP_SPACE: return "MAP_SPACE";
428 case CELL_SPACE: return "CELL_SPACE";
429 case LO_SPACE: return "LO_SPACE";
438 // Returns zero for pages that have so little fragmentation that it is not
439 // worth defragmenting them. Otherwise a positive integer that gives an
440 // estimate of fragmentation on an arbitrary scale.
441 static int FreeListFragmentation(PagedSpace* space, Page* p) {
442 // If page was not swept then there are no free list items on it.
443 if (!p->WasSwept()) {
444 if (FLAG_trace_fragmentation) {
445 PrintF("%p [%s]: %d bytes live (unswept)\n",
446 reinterpret_cast<void*>(p),
447 AllocationSpaceName(space->identity()),
453 FreeList::SizeStats sizes;
454 space->CountFreeListItems(p, &sizes);
457 intptr_t ratio_threshold;
458 intptr_t area_size = space->AreaSize();
459 if (space->identity() == CODE_SPACE) {
460 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
462 ratio_threshold = 10;
464 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
466 ratio_threshold = 15;
469 if (FLAG_trace_fragmentation) {
470 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
471 reinterpret_cast<void*>(p),
472 AllocationSpaceName(space->identity()),
473 static_cast<int>(sizes.small_size_),
474 static_cast<double>(sizes.small_size_ * 100) /
476 static_cast<int>(sizes.medium_size_),
477 static_cast<double>(sizes.medium_size_ * 100) /
479 static_cast<int>(sizes.large_size_),
480 static_cast<double>(sizes.large_size_ * 100) /
482 static_cast<int>(sizes.huge_size_),
483 static_cast<double>(sizes.huge_size_ * 100) /
485 (ratio > ratio_threshold) ? "[fragmented]" : "");
488 if (FLAG_always_compact && sizes.Total() != area_size) {
492 if (ratio <= ratio_threshold) return 0; // Not fragmented.
494 return static_cast<int>(ratio - ratio_threshold);
498 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
499 ASSERT(space->identity() == OLD_POINTER_SPACE ||
500 space->identity() == OLD_DATA_SPACE ||
501 space->identity() == CODE_SPACE);
503 int number_of_pages = space->CountTotalPages();
505 const int kMaxMaxEvacuationCandidates = 1000;
506 int max_evacuation_candidates = Min(
507 kMaxMaxEvacuationCandidates,
508 static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
510 if (FLAG_stress_compaction || FLAG_always_compact) {
511 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
516 Candidate() : fragmentation_(0), page_(NULL) { }
517 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
519 int fragmentation() { return fragmentation_; }
520 Page* page() { return page_; }
527 enum CompactionMode {
529 REDUCE_MEMORY_FOOTPRINT
532 CompactionMode mode = COMPACT_FREE_LISTS;
534 intptr_t reserved = number_of_pages * space->AreaSize();
535 intptr_t over_reserved = reserved - space->SizeOfObjects();
536 static const intptr_t kFreenessThreshold = 50;
538 if (over_reserved >= 2 * space->AreaSize() &&
539 reduce_memory_footprint_) {
540 mode = REDUCE_MEMORY_FOOTPRINT;
542 // We expect that empty pages are easier to compact so slightly bump the
544 max_evacuation_candidates += 2;
546 if (FLAG_trace_fragmentation) {
547 PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
548 static_cast<double>(over_reserved) / MB,
549 static_cast<int>(kFreenessThreshold));
553 intptr_t estimated_release = 0;
555 Candidate candidates[kMaxMaxEvacuationCandidates];
558 int fragmentation = 0;
559 Candidate* least = NULL;
561 PageIterator it(space);
562 if (it.has_next()) it.next(); // Never compact the first page.
564 while (it.has_next()) {
566 p->ClearEvacuationCandidate();
568 if (FLAG_stress_compaction) {
569 int counter = space->heap()->ms_count();
570 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
571 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
572 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
573 // Don't try to release too many pages.
574 if (estimated_release >= ((over_reserved * 3) / 4)) {
578 intptr_t free_bytes = 0;
580 if (!p->WasSwept()) {
581 free_bytes = (p->area_size() - p->LiveBytes());
583 FreeList::SizeStats sizes;
584 space->CountFreeListItems(p, &sizes);
585 free_bytes = sizes.Total();
588 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
590 if (free_pct >= kFreenessThreshold) {
591 estimated_release += 2 * p->area_size() - free_bytes;
592 fragmentation = free_pct;
597 if (FLAG_trace_fragmentation) {
598 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
599 reinterpret_cast<void*>(p),
600 AllocationSpaceName(space->identity()),
601 static_cast<int>(free_bytes),
602 static_cast<double>(free_bytes * 100) / p->area_size(),
603 (fragmentation > 0) ? "[fragmented]" : "");
606 fragmentation = FreeListFragmentation(space, p);
609 if (fragmentation != 0) {
610 if (count < max_evacuation_candidates) {
611 candidates[count++] = Candidate(fragmentation, p);
614 for (int i = 0; i < max_evacuation_candidates; i++) {
616 candidates[i].fragmentation() < least->fragmentation()) {
617 least = candidates + i;
621 if (least->fragmentation() < fragmentation) {
622 *least = Candidate(fragmentation, p);
629 for (int i = 0; i < count; i++) {
630 AddEvacuationCandidate(candidates[i].page());
633 if (count > 0 && FLAG_trace_fragmentation) {
634 PrintF("Collected %d evacuation candidates for space %s\n",
636 AllocationSpaceName(space->identity()));
641 void MarkCompactCollector::AbortCompaction() {
643 int npages = evacuation_candidates_.length();
644 for (int i = 0; i < npages; i++) {
645 Page* p = evacuation_candidates_[i];
646 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
647 p->ClearEvacuationCandidate();
648 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
651 evacuation_candidates_.Rewind(0);
652 invalidated_code_.Rewind(0);
654 ASSERT_EQ(0, evacuation_candidates_.length());
658 void MarkCompactCollector::Prepare(GCTracer* tracer) {
659 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
661 // Disable collection of maps if incremental marking is enabled.
662 // Map collection algorithm relies on a special map transition tree traversal
663 // order which is not implemented for incremental marking.
664 collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
666 // Monomorphic ICs are preserved when possible, but need to be flushed
667 // when they might be keeping a Context alive, or when the heap is about
669 flush_monomorphic_ics_ =
670 heap()->isolate()->context_exit_happened() || Serializer::enabled();
672 // Rather than passing the tracer around we stash it in a static member
677 ASSERT(state_ == IDLE);
681 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
683 if (collect_maps_) CreateBackPointers();
684 #ifdef ENABLE_GDB_JIT_INTERFACE
686 // If GDBJIT interface is active disable compaction.
687 compacting_collection_ = false;
691 // Clear marking bits if incremental marking is aborted.
692 if (was_marked_incrementally_ && abort_incremental_marking_) {
693 heap()->incremental_marking()->Abort();
696 was_marked_incrementally_ = false;
699 // Don't start compaction if we are in the middle of incremental
700 // marking cycle. We did not collect any slots.
701 if (!FLAG_never_compact && !was_marked_incrementally_) {
702 StartCompaction(NON_INCREMENTAL_COMPACTION);
706 for (PagedSpace* space = spaces.next();
708 space = spaces.next()) {
709 space->PrepareForMarkCompact();
713 if (!was_marked_incrementally_ && FLAG_verify_heap) {
714 VerifyMarkbitsAreClean();
720 void MarkCompactCollector::Finish() {
722 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
725 // The stub cache is not traversed during GC; clear the cache to
726 // force lazy re-initialization of it. This must be done after the
727 // GC, because it relies on the new address of certain old space
728 // objects (empty string, illegal builtin).
729 heap()->isolate()->stub_cache()->Clear();
731 heap()->external_string_table_.CleanUp();
735 // -------------------------------------------------------------------------
736 // Phase 1: tracing and marking live objects.
737 // before: all objects are in normal state.
738 // after: a live object's map pointer is marked as '00'.
740 // Marking all live objects in the heap as part of mark-sweep or mark-compact
741 // collection. Before marking, all objects are in their normal state. After
742 // marking, live objects' map pointers are marked indicating that the object
743 // has been found reachable.
745 // The marking algorithm is a (mostly) depth-first (because of possible stack
746 // overflow) traversal of the graph of objects reachable from the roots. It
747 // uses an explicit stack of pointers rather than recursion. The young
748 // generation's inactive ('from') space is used as a marking stack. The
749 // objects in the marking stack are the ones that have been reached and marked
750 // but their children have not yet been visited.
752 // The marking stack can overflow during traversal. In that case, we set an
753 // overflow flag. When the overflow flag is set, we continue marking objects
754 // reachable from the objects on the marking stack, but no longer push them on
755 // the marking stack. Instead, we mark them as both marked and overflowed.
756 // When the stack is in the overflowed state, objects marked as overflowed
757 // have been reached and marked but their children have not been visited yet.
758 // After emptying the marking stack, we clear the overflow flag and traverse
759 // the heap looking for objects marked as overflowed, push them on the stack,
760 // and continue with marking. This process repeats until all reachable
761 // objects have been marked.
765 explicit CodeFlusher(Isolate* isolate)
767 jsfunction_candidates_head_(NULL),
768 shared_function_info_candidates_head_(NULL) {}
770 void AddCandidate(SharedFunctionInfo* shared_info) {
771 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
772 shared_function_info_candidates_head_ = shared_info;
775 void AddCandidate(JSFunction* function) {
776 ASSERT(function->code() == function->shared()->code());
778 SetNextCandidate(function, jsfunction_candidates_head_);
779 jsfunction_candidates_head_ = function;
782 void ProcessCandidates() {
783 ProcessSharedFunctionInfoCandidates();
784 ProcessJSFunctionCandidates();
788 void ProcessJSFunctionCandidates() {
789 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
791 JSFunction* candidate = jsfunction_candidates_head_;
792 JSFunction* next_candidate;
793 while (candidate != NULL) {
794 next_candidate = GetNextCandidate(candidate);
796 SharedFunctionInfo* shared = candidate->shared();
798 Code* code = shared->code();
799 MarkBit code_mark = Marking::MarkBitFrom(code);
800 if (!code_mark.Get()) {
801 shared->set_code(lazy_compile);
802 candidate->set_code(lazy_compile);
804 candidate->set_code(shared->code());
807 // We are in the middle of a GC cycle so the write barrier in the code
808 // setter did not record the slot update and we have to do that manually.
809 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
810 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
811 isolate_->heap()->mark_compact_collector()->
812 RecordCodeEntrySlot(slot, target);
814 RecordSharedFunctionInfoCodeSlot(shared);
816 candidate = next_candidate;
819 jsfunction_candidates_head_ = NULL;
823 void ProcessSharedFunctionInfoCandidates() {
824 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
826 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
827 SharedFunctionInfo* next_candidate;
828 while (candidate != NULL) {
829 next_candidate = GetNextCandidate(candidate);
830 SetNextCandidate(candidate, NULL);
832 Code* code = candidate->code();
833 MarkBit code_mark = Marking::MarkBitFrom(code);
834 if (!code_mark.Get()) {
835 candidate->set_code(lazy_compile);
838 RecordSharedFunctionInfoCodeSlot(candidate);
840 candidate = next_candidate;
843 shared_function_info_candidates_head_ = NULL;
846 void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) {
847 Object** slot = HeapObject::RawField(shared,
848 SharedFunctionInfo::kCodeOffset);
849 isolate_->heap()->mark_compact_collector()->
850 RecordSlot(slot, slot, HeapObject::cast(*slot));
853 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
854 return reinterpret_cast<JSFunction**>(
855 candidate->address() + JSFunction::kCodeEntryOffset);
858 static JSFunction* GetNextCandidate(JSFunction* candidate) {
859 return *GetNextCandidateField(candidate);
862 static void SetNextCandidate(JSFunction* candidate,
863 JSFunction* next_candidate) {
864 *GetNextCandidateField(candidate) = next_candidate;
867 static SharedFunctionInfo** GetNextCandidateField(
868 SharedFunctionInfo* candidate) {
869 Code* code = candidate->code();
870 return reinterpret_cast<SharedFunctionInfo**>(
871 code->address() + Code::kGCMetadataOffset);
874 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
875 return reinterpret_cast<SharedFunctionInfo*>(
876 candidate->code()->gc_metadata());
879 static void SetNextCandidate(SharedFunctionInfo* candidate,
880 SharedFunctionInfo* next_candidate) {
881 candidate->code()->set_gc_metadata(next_candidate);
885 JSFunction* jsfunction_candidates_head_;
886 SharedFunctionInfo* shared_function_info_candidates_head_;
888 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
892 MarkCompactCollector::~MarkCompactCollector() {
893 if (code_flusher_ != NULL) {
894 delete code_flusher_;
895 code_flusher_ = NULL;
900 static inline HeapObject* ShortCircuitConsString(Object** p) {
901 // Optimization: If the heap object pointed to by p is a non-symbol
902 // cons string whose right substring is HEAP->empty_string, update
903 // it in place to its left substring. Return the updated value.
905 // Here we assume that if we change *p, we replace it with a heap object
906 // (i.e., the left substring of a cons string is always a heap object).
908 // The check performed is:
909 // object->IsConsString() && !object->IsSymbol() &&
910 // (ConsString::cast(object)->second() == HEAP->empty_string())
911 // except the maps for the object and its possible substrings might be
913 HeapObject* object = HeapObject::cast(*p);
914 if (!FLAG_clever_optimizations) return object;
915 Map* map = object->map();
916 InstanceType type = map->instance_type();
917 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
919 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
920 Heap* heap = map->GetHeap();
921 if (second != heap->empty_string()) {
925 // Since we don't have the object's start, it is impossible to update the
926 // page dirty marks. Therefore, we only replace the string with its left
927 // substring when page dirty marks do not change.
928 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
929 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
932 return HeapObject::cast(first);
936 class StaticMarkingVisitor : public StaticVisitorBase {
938 static inline void IterateBody(Map* map, HeapObject* obj) {
939 table_.GetVisitor(map)(map, obj);
942 static void Initialize() {
943 table_.Register(kVisitShortcutCandidate,
944 &FixedBodyVisitor<StaticMarkingVisitor,
945 ConsString::BodyDescriptor,
948 table_.Register(kVisitConsString,
949 &FixedBodyVisitor<StaticMarkingVisitor,
950 ConsString::BodyDescriptor,
953 table_.Register(kVisitSlicedString,
954 &FixedBodyVisitor<StaticMarkingVisitor,
955 SlicedString::BodyDescriptor,
958 table_.Register(kVisitFixedArray,
959 &FlexibleBodyVisitor<StaticMarkingVisitor,
960 FixedArray::BodyDescriptor,
963 table_.Register(kVisitGlobalContext, &VisitGlobalContext);
965 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
967 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
968 table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
969 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
970 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
972 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
974 table_.Register(kVisitOddball,
975 &FixedBodyVisitor<StaticMarkingVisitor,
976 Oddball::BodyDescriptor,
978 table_.Register(kVisitMap,
979 &FixedBodyVisitor<StaticMarkingVisitor,
983 table_.Register(kVisitCode, &VisitCode);
985 table_.Register(kVisitSharedFunctionInfo,
986 &VisitSharedFunctionInfoAndFlushCode);
988 table_.Register(kVisitJSFunction,
989 &VisitJSFunctionAndFlushCode);
991 table_.Register(kVisitJSRegExp,
992 &VisitRegExpAndFlushCode);
994 table_.Register(kVisitPropertyCell,
995 &FixedBodyVisitor<StaticMarkingVisitor,
996 JSGlobalPropertyCell::BodyDescriptor,
999 table_.RegisterSpecializations<DataObjectVisitor,
1001 kVisitDataObjectGeneric>();
1003 table_.RegisterSpecializations<JSObjectVisitor,
1005 kVisitJSObjectGeneric>();
1007 table_.RegisterSpecializations<StructObjectVisitor,
1009 kVisitStructGeneric>();
1012 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
1013 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
1016 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
1017 // Mark all objects pointed to in [start, end).
1018 const int kMinRangeForMarkingRecursion = 64;
1019 if (end - start >= kMinRangeForMarkingRecursion) {
1020 if (VisitUnmarkedObjects(heap, start, end)) return;
1021 // We are close to a stack overflow, so just mark the objects.
1023 MarkCompactCollector* collector = heap->mark_compact_collector();
1024 for (Object** p = start; p < end; p++) {
1025 MarkObjectByPointer(collector, start, p);
1029 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
1030 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
1031 JSGlobalPropertyCell* cell =
1032 JSGlobalPropertyCell::cast(rinfo->target_cell());
1033 MarkBit mark = Marking::MarkBitFrom(cell);
1034 heap->mark_compact_collector()->MarkObject(cell, mark);
1037 static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
1038 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1039 // TODO(mstarzinger): We do not short-circuit cons strings here, verify
1040 // that there can be no such embedded pointers and add assertion here.
1041 HeapObject* object = HeapObject::cast(rinfo->target_object());
1042 heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
1043 MarkBit mark = Marking::MarkBitFrom(object);
1044 heap->mark_compact_collector()->MarkObject(object, mark);
1047 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
1048 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1049 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1050 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
1051 && (target->ic_state() == MEGAMORPHIC ||
1052 heap->mark_compact_collector()->flush_monomorphic_ics_ ||
1053 target->ic_age() != heap->global_ic_age())) {
1054 IC::Clear(rinfo->pc());
1055 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1057 MarkBit code_mark = Marking::MarkBitFrom(target);
1058 heap->mark_compact_collector()->MarkObject(target, code_mark);
1059 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1062 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
1063 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1064 rinfo->IsPatchedReturnSequence()) ||
1065 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1066 rinfo->IsPatchedDebugBreakSlotSequence()));
1067 Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1068 MarkBit code_mark = Marking::MarkBitFrom(target);
1069 heap->mark_compact_collector()->MarkObject(target, code_mark);
1070 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
1073 // Mark object pointed to by p.
1074 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1075 Object** anchor_slot,
1077 if (!(*p)->IsHeapObject()) return;
1078 HeapObject* object = ShortCircuitConsString(p);
1079 collector->RecordSlot(anchor_slot, p, object);
1080 MarkBit mark = Marking::MarkBitFrom(object);
1081 collector->MarkObject(object, mark);
1085 // Visit an unmarked object.
1086 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1089 ASSERT(Isolate::Current()->heap()->Contains(obj));
1090 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
1092 Map* map = obj->map();
1093 Heap* heap = obj->GetHeap();
1094 MarkBit mark = Marking::MarkBitFrom(obj);
1095 heap->mark_compact_collector()->SetMark(obj, mark);
1096 // Mark the map pointer and the body.
1097 MarkBit map_mark = Marking::MarkBitFrom(map);
1098 heap->mark_compact_collector()->MarkObject(map, map_mark);
1099 IterateBody(map, obj);
1102 // Visit all unmarked objects pointed to by [start, end).
1103 // Returns false if the operation fails (lack of stack space).
1104 static inline bool VisitUnmarkedObjects(Heap* heap,
1107 // Return false is we are close to the stack limit.
1108 StackLimitCheck check(heap->isolate());
1109 if (check.HasOverflowed()) return false;
1111 MarkCompactCollector* collector = heap->mark_compact_collector();
1112 // Visit the unmarked objects.
1113 for (Object** p = start; p < end; p++) {
1115 if (!o->IsHeapObject()) continue;
1116 collector->RecordSlot(start, p, o);
1117 HeapObject* obj = HeapObject::cast(o);
1118 MarkBit mark = Marking::MarkBitFrom(obj);
1119 if (mark.Get()) continue;
1120 VisitUnmarkedObject(collector, obj);
1125 static inline void VisitExternalReference(Address* p) { }
1126 static inline void VisitExternalReference(RelocInfo* rinfo) { }
1127 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
1130 class DataObjectVisitor {
1133 static void VisitSpecialized(Map* map, HeapObject* object) {
1136 static void Visit(Map* map, HeapObject* object) {
1140 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1141 JSObject::BodyDescriptor,
1142 void> JSObjectVisitor;
1144 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
1145 StructBodyDescriptor,
1146 void> StructObjectVisitor;
1148 static void VisitJSWeakMap(Map* map, HeapObject* object) {
1149 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1150 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1152 // Enqueue weak map in linked list of encountered weak maps.
1153 ASSERT(weak_map->next() == Smi::FromInt(0));
1154 weak_map->set_next(collector->encountered_weak_maps());
1155 collector->set_encountered_weak_maps(weak_map);
1157 // Skip visiting the backing hash table containing the mappings.
1158 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
1159 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1162 JSWeakMap::BodyDescriptor::kStartOffset,
1163 JSWeakMap::kTableOffset);
1164 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
1167 JSWeakMap::kTableOffset + kPointerSize,
1170 // Mark the backing hash table without pushing it on the marking stack.
1171 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
1172 ASSERT(!MarkCompactCollector::IsMarked(table));
1173 collector->SetMark(table, Marking::MarkBitFrom(table));
1174 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1175 ASSERT(MarkCompactCollector::IsMarked(table->map()));
1178 static void VisitCode(Map* map, HeapObject* object) {
1179 Heap* heap = map->GetHeap();
1180 Code* code = reinterpret_cast<Code*>(object);
1181 if (FLAG_cleanup_code_caches_at_gc) {
1182 Object* raw_info = code->type_feedback_info();
1183 if (raw_info->IsTypeFeedbackInfo()) {
1184 TypeFeedbackCells* type_feedback_cells =
1185 TypeFeedbackInfo::cast(raw_info)->type_feedback_cells();
1186 for (int i = 0; i < type_feedback_cells->CellCount(); i++) {
1187 ASSERT(type_feedback_cells->AstId(i)->IsSmi());
1188 JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i);
1189 cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap));
1193 code->CodeIterateBody<StaticMarkingVisitor>(heap);
1196 // Code flushing support.
1198 // How many collections newly compiled code object will survive before being
1200 static const int kCodeAgeThreshold = 5;
1202 static const int kRegExpCodeThreshold = 5;
1204 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
1205 Object* undefined = heap->undefined_value();
1206 return (info->script() != undefined) &&
1207 (reinterpret_cast<Script*>(info->script())->source() != undefined);
1211 inline static bool IsCompiled(JSFunction* function) {
1212 return function->code() !=
1213 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1216 inline static bool IsCompiled(SharedFunctionInfo* function) {
1217 return function->code() !=
1218 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
1221 inline static bool IsFlushable(Heap* heap, JSFunction* function) {
1222 SharedFunctionInfo* shared_info = function->unchecked_shared();
1224 // Code is either on stack, in compilation cache or referenced
1225 // by optimized version of function.
1226 MarkBit code_mark = Marking::MarkBitFrom(function->code());
1227 if (code_mark.Get()) {
1228 if (!Marking::MarkBitFrom(shared_info).Get()) {
1229 shared_info->set_code_age(0);
1234 // We do not flush code for optimized functions.
1235 if (function->code() != shared_info->code()) {
1239 return IsFlushable(heap, shared_info);
1242 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
1243 // Code is either on stack, in compilation cache or referenced
1244 // by optimized version of function.
1246 Marking::MarkBitFrom(shared_info->code());
1247 if (code_mark.Get()) {
1251 // The function must be compiled and have the source code available,
1252 // to be able to recompile it in case we need the function again.
1253 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
1257 // We never flush code for Api functions.
1258 Object* function_data = shared_info->function_data();
1259 if (function_data->IsFunctionTemplateInfo()) {
1263 // Only flush code for functions.
1264 if (shared_info->code()->kind() != Code::FUNCTION) {
1268 // Function must be lazy compilable.
1269 if (!shared_info->allows_lazy_compilation()) {
1273 // If this is a full script wrapped in a function we do no flush the code.
1274 if (shared_info->is_toplevel()) {
1278 // Age this shared function info.
1279 if (shared_info->code_age() < kCodeAgeThreshold) {
1280 shared_info->set_code_age(shared_info->code_age() + 1);
1288 static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
1289 if (!IsFlushable(heap, function)) return false;
1291 // This function's code looks flushable. But we have to postpone the
1292 // decision until we see all functions that point to the same
1293 // SharedFunctionInfo because some of them might be optimized.
1294 // That would make the nonoptimized version of the code nonflushable,
1295 // because it is required for bailing out from optimized code.
1296 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
1300 static inline bool IsValidNotBuiltinContext(Object* ctx) {
1301 return ctx->IsContext() &&
1302 !Context::cast(ctx)->global()->IsJSBuiltinsObject();
1306 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
1307 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1309 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1311 FixedBodyVisitor<StaticMarkingVisitor,
1312 SharedFunctionInfo::BodyDescriptor,
1313 void>::Visit(map, object);
1317 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1320 // Make sure that the fixed array is in fact initialized on the RegExp.
1321 // We could potentially trigger a GC when initializing the RegExp.
1322 if (HeapObject::cast(re->data())->map()->instance_type() !=
1323 FIXED_ARRAY_TYPE) return;
1325 // Make sure this is a RegExp that actually contains code.
1326 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1328 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
1329 if (!code->IsSmi() &&
1330 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1331 // Save a copy that can be reinstated if we need the code again.
1332 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1336 // Saving a copy might create a pointer into compaction candidate
1337 // that was not observed by marker. This might happen if JSRegExp data
1338 // was marked through the compilation cache before marker reached JSRegExp
1340 FixedArray* data = FixedArray::cast(re->data());
1341 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1342 heap->mark_compact_collector()->
1343 RecordSlot(slot, slot, code);
1345 // Set a number in the 0-255 range to guarantee no smi overflow.
1346 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1347 Smi::FromInt(heap->sweep_generation() & 0xff),
1349 } else if (code->IsSmi()) {
1350 int value = Smi::cast(code)->value();
1351 // The regexp has not been compiled yet or there was a compilation error.
1352 if (value == JSRegExp::kUninitializedValue ||
1353 value == JSRegExp::kCompilationErrorValue) {
1357 // Check if we should flush now.
1358 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1359 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1360 Smi::FromInt(JSRegExp::kUninitializedValue),
1362 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1363 Smi::FromInt(JSRegExp::kUninitializedValue),
1370 // Works by setting the current sweep_generation (as a smi) in the
1371 // code object place in the data array of the RegExp and keeps a copy
1372 // around that can be reinstated if we reuse the RegExp before flushing.
1373 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1374 // we flush the code.
1375 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1376 Heap* heap = map->GetHeap();
1377 MarkCompactCollector* collector = heap->mark_compact_collector();
1378 if (!collector->is_code_flushing_enabled()) {
1379 VisitJSRegExpFields(map, object);
1382 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1383 // Flush code or set age on both ASCII and two byte code.
1384 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1385 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1386 // Visit the fields of the RegExp, including the updated FixedArray.
1387 VisitJSRegExpFields(map, object);
1391 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
1392 HeapObject* object) {
1393 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1394 if (!collector->is_code_flushing_enabled()) {
1395 VisitSharedFunctionInfoGeneric(map, object);
1398 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
1402 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
1403 Map* map, HeapObject* object, bool known_flush_code_candidate) {
1404 Heap* heap = map->GetHeap();
1405 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
1407 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
1409 if (shared->ic_age() != heap->global_ic_age()) {
1410 shared->ResetForNewContext(heap->global_ic_age());
1413 if (!known_flush_code_candidate) {
1414 known_flush_code_candidate = IsFlushable(heap, shared);
1415 if (known_flush_code_candidate) {
1416 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
1420 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
1424 static void VisitCodeEntry(Heap* heap, Address entry_address) {
1425 Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
1426 MarkBit mark = Marking::MarkBitFrom(code);
1427 heap->mark_compact_collector()->MarkObject(code, mark);
1428 heap->mark_compact_collector()->
1429 RecordCodeEntrySlot(entry_address, code);
1432 static void VisitGlobalContext(Map* map, HeapObject* object) {
1433 FixedBodyVisitor<StaticMarkingVisitor,
1434 Context::MarkCompactBodyDescriptor,
1435 void>::Visit(map, object);
1437 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
1438 for (int idx = Context::FIRST_WEAK_SLOT;
1439 idx < Context::GLOBAL_CONTEXT_SLOTS;
1442 HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx));
1443 collector->RecordSlot(slot, slot, *slot);
1447 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
1448 Heap* heap = map->GetHeap();
1449 MarkCompactCollector* collector = heap->mark_compact_collector();
1450 if (!collector->is_code_flushing_enabled()) {
1451 VisitJSFunction(map, object);
1455 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
1456 // The function must have a valid context and not be a builtin.
1457 bool flush_code_candidate = false;
1458 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
1459 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
1462 if (!flush_code_candidate) {
1463 Code* code = jsfunction->shared()->code();
1464 MarkBit code_mark = Marking::MarkBitFrom(code);
1465 collector->MarkObject(code, code_mark);
1467 if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
1468 collector->MarkInlinedFunctionsCode(jsfunction->code());
1472 VisitJSFunctionFields(map,
1473 reinterpret_cast<JSFunction*>(object),
1474 flush_code_candidate);
1478 static void VisitJSFunction(Map* map, HeapObject* object) {
1479 VisitJSFunctionFields(map,
1480 reinterpret_cast<JSFunction*>(object),
1485 #define SLOT_ADDR(obj, offset) \
1486 reinterpret_cast<Object**>((obj)->address() + offset)
1489 static inline void VisitJSFunctionFields(Map* map,
1491 bool flush_code_candidate) {
1492 Heap* heap = map->GetHeap();
1495 HeapObject::RawField(object, JSFunction::kPropertiesOffset),
1496 HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
1498 if (!flush_code_candidate) {
1499 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
1501 // Don't visit code object.
1503 // Visit shared function info to avoid double checking of it's
1505 SharedFunctionInfo* shared_info = object->unchecked_shared();
1506 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info);
1507 if (!shared_info_mark.Get()) {
1508 Map* shared_info_map = shared_info->map();
1509 MarkBit shared_info_map_mark =
1510 Marking::MarkBitFrom(shared_info_map);
1511 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark);
1512 heap->mark_compact_collector()->MarkObject(shared_info_map,
1513 shared_info_map_mark);
1514 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
1522 HeapObject::RawField(object,
1523 JSFunction::kCodeEntryOffset + kPointerSize),
1524 HeapObject::RawField(object,
1525 JSFunction::kNonWeakFieldsEndOffset));
1527 // Don't visit the next function list field as it is a weak reference.
1528 Object** next_function =
1529 HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset);
1530 heap->mark_compact_collector()->RecordSlot(
1531 next_function, next_function, *next_function);
1534 static inline void VisitJSRegExpFields(Map* map,
1535 HeapObject* object) {
1536 int last_property_offset =
1537 JSRegExp::kSize + kPointerSize * map->inobject_properties();
1538 VisitPointers(map->GetHeap(),
1539 SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
1540 SLOT_ADDR(object, last_property_offset));
1544 static void VisitSharedFunctionInfoFields(Heap* heap,
1546 bool flush_code_candidate) {
1547 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
1549 if (!flush_code_candidate) {
1550 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
1554 SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
1555 SLOT_ADDR(object, SharedFunctionInfo::kSize));
1560 typedef void (*Callback)(Map* map, HeapObject* object);
1562 static VisitorDispatchTable<Callback> table_;
1566 VisitorDispatchTable<StaticMarkingVisitor::Callback>
1567 StaticMarkingVisitor::table_;
1570 class MarkingVisitor : public ObjectVisitor {
1572 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1574 void VisitPointer(Object** p) {
1575 StaticMarkingVisitor::VisitPointer(heap_, p);
1578 void VisitPointers(Object** start, Object** end) {
1579 StaticMarkingVisitor::VisitPointers(heap_, start, end);
1587 class CodeMarkingVisitor : public ThreadVisitor {
1589 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1590 : collector_(collector) {}
1592 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1593 collector_->PrepareThreadForCodeFlushing(isolate, top);
1597 MarkCompactCollector* collector_;
1601 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1603 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1604 : collector_(collector) {}
1606 void VisitPointers(Object** start, Object** end) {
1607 for (Object** p = start; p < end; p++) VisitPointer(p);
1610 void VisitPointer(Object** slot) {
1611 Object* obj = *slot;
1612 if (obj->IsSharedFunctionInfo()) {
1613 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1614 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1615 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1616 collector_->MarkObject(shared->code(), code_mark);
1617 collector_->MarkObject(shared, shared_mark);
1622 MarkCompactCollector* collector_;
1626 void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1627 // For optimized functions we should retain both non-optimized version
1628 // of it's code and non-optimized version of all inlined functions.
1629 // This is required to support bailing out from inlined code.
1630 DeoptimizationInputData* data =
1631 DeoptimizationInputData::cast(code->deoptimization_data());
1633 FixedArray* literals = data->LiteralArray();
1635 for (int i = 0, count = data->InlinedFunctionCount()->value();
1638 JSFunction* inlined = JSFunction::cast(literals->get(i));
1639 Code* inlined_code = inlined->shared()->code();
1640 MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
1641 MarkObject(inlined_code, inlined_code_mark);
1646 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1647 ThreadLocalTop* top) {
1648 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1649 // Note: for the frame that has a pending lazy deoptimization
1650 // StackFrame::unchecked_code will return a non-optimized code object for
1651 // the outermost function and StackFrame::LookupCode will return
1652 // actual optimized code object.
1653 StackFrame* frame = it.frame();
1654 Code* code = frame->unchecked_code();
1655 MarkBit code_mark = Marking::MarkBitFrom(code);
1656 MarkObject(code, code_mark);
1657 if (frame->is_optimized()) {
1658 MarkInlinedFunctionsCode(frame->LookupCode());
1664 void MarkCompactCollector::PrepareForCodeFlushing() {
1665 ASSERT(heap() == Isolate::Current()->heap());
1667 // TODO(1609) Currently incremental marker does not support code flushing.
1668 if (!FLAG_flush_code || was_marked_incrementally_) {
1669 EnableCodeFlushing(false);
1673 #ifdef ENABLE_DEBUGGER_SUPPORT
1674 if (heap()->isolate()->debug()->IsLoaded() ||
1675 heap()->isolate()->debug()->has_break_points()) {
1676 EnableCodeFlushing(false);
1681 EnableCodeFlushing(true);
1683 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1684 // relies on it being marked before any other descriptor array.
1685 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1686 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1687 MarkObject(descriptor_array, descriptor_array_mark);
1689 // Make sure we are not referencing the code from the stack.
1690 ASSERT(this == heap()->mark_compact_collector());
1691 PrepareThreadForCodeFlushing(heap()->isolate(),
1692 heap()->isolate()->thread_local_top());
1694 // Iterate the archived stacks in all threads to check if
1695 // the code is referenced.
1696 CodeMarkingVisitor code_marking_visitor(this);
1697 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1698 &code_marking_visitor);
1700 SharedFunctionInfoMarkingVisitor visitor(this);
1701 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1702 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1704 ProcessMarkingDeque();
1708 // Visitor class for marking heap roots.
1709 class RootMarkingVisitor : public ObjectVisitor {
1711 explicit RootMarkingVisitor(Heap* heap)
1712 : collector_(heap->mark_compact_collector()) { }
1714 void VisitPointer(Object** p) {
1715 MarkObjectByPointer(p);
1718 void VisitPointers(Object** start, Object** end) {
1719 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1723 void MarkObjectByPointer(Object** p) {
1724 if (!(*p)->IsHeapObject()) return;
1726 // Replace flat cons strings in place.
1727 HeapObject* object = ShortCircuitConsString(p);
1728 MarkBit mark_bit = Marking::MarkBitFrom(object);
1729 if (mark_bit.Get()) return;
1731 Map* map = object->map();
1733 collector_->SetMark(object, mark_bit);
1735 // Mark the map pointer and body, and push them on the marking stack.
1736 MarkBit map_mark = Marking::MarkBitFrom(map);
1737 collector_->MarkObject(map, map_mark);
1738 StaticMarkingVisitor::IterateBody(map, object);
1740 // Mark all the objects reachable from the map and body. May leave
1741 // overflowed objects in the heap.
1742 collector_->EmptyMarkingDeque();
1745 MarkCompactCollector* collector_;
1749 // Helper class for pruning the symbol table.
1750 class SymbolTableCleaner : public ObjectVisitor {
1752 explicit SymbolTableCleaner(Heap* heap)
1753 : heap_(heap), pointers_removed_(0) { }
1755 virtual void VisitPointers(Object** start, Object** end) {
1756 // Visit all HeapObject pointers in [start, end).
1757 for (Object** p = start; p < end; p++) {
1759 if (o->IsHeapObject() &&
1760 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
1761 // Check if the symbol being pruned is an external symbol. We need to
1762 // delete the associated external data as this symbol is going away.
1764 // Since no objects have yet been moved we can safely access the map of
1766 if (o->IsExternalString() ||
1767 (o->IsHeapObject() && HeapObject::cast(o)->map()->has_external_resource())) {
1768 heap_->FinalizeExternalString(HeapObject::cast(*p));
1770 // Set the entry to the_hole_value (as deleted).
1771 *p = heap_->the_hole_value();
1772 pointers_removed_++;
1777 int PointersRemoved() {
1778 return pointers_removed_;
1783 int pointers_removed_;
1787 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1789 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1791 virtual Object* RetainAs(Object* object) {
1792 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
1801 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) {
1802 ASSERT(IsMarked(object));
1803 ASSERT(HEAP->Contains(object));
1804 if (object->IsMap()) {
1805 Map* map = Map::cast(object);
1806 heap_->ClearCacheOnMap(map);
1808 // When map collection is enabled we have to mark through map's transitions
1809 // in a special way to make transition links weak.
1810 // Only maps for subclasses of JSReceiver can have transitions.
1811 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1812 if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1813 MarkMapContents(map);
1815 marking_deque_.PushBlack(map);
1818 marking_deque_.PushBlack(object);
1823 void MarkCompactCollector::MarkMapContents(Map* map) {
1824 // Mark prototype transitions array but don't push it into marking stack.
1825 // This will make references from it weak. We will clean dead prototype
1826 // transitions in ClearNonLiveTransitions.
1827 FixedArray* prototype_transitions = map->prototype_transitions();
1828 MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
1831 MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
1832 prototype_transitions->Size());
1835 Object** raw_descriptor_array_slot =
1836 HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset);
1837 Object* raw_descriptor_array = *raw_descriptor_array_slot;
1838 if (!raw_descriptor_array->IsSmi()) {
1839 MarkDescriptorArray(
1840 reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1843 // Mark the Object* fields of the Map.
1844 // Since the descriptor array has been marked already, it is fine
1845 // that one of these fields contains a pointer to it.
1846 Object** start_slot = HeapObject::RawField(map,
1847 Map::kPointerFieldsBeginOffset);
1849 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1851 StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot);
1855 void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
1857 Object** slot = HeapObject::RawField(accessors, offset);
1858 HeapObject* accessor = HeapObject::cast(*slot);
1859 if (accessor->IsMap()) return;
1860 RecordSlot(slot, slot, accessor);
1861 MarkObjectAndPush(accessor);
1865 void MarkCompactCollector::MarkDescriptorArray(
1866 DescriptorArray* descriptors) {
1867 MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
1868 if (descriptors_mark.Get()) return;
1869 // Empty descriptor array is marked as a root before any maps are marked.
1870 ASSERT(descriptors != heap()->empty_descriptor_array());
1871 SetMark(descriptors, descriptors_mark);
1873 FixedArray* contents = reinterpret_cast<FixedArray*>(
1874 descriptors->get(DescriptorArray::kContentArrayIndex));
1875 ASSERT(contents->IsHeapObject());
1876 ASSERT(!IsMarked(contents));
1877 ASSERT(contents->IsFixedArray());
1878 ASSERT(contents->length() >= 2);
1879 MarkBit contents_mark = Marking::MarkBitFrom(contents);
1880 SetMark(contents, contents_mark);
1881 // Contents contains (value, details) pairs. If the details say that the type
1882 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1883 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1884 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1885 // CONSTANT_TRANSITION is the value an Object* (a Map*).
1886 for (int i = 0; i < contents->length(); i += 2) {
1887 // If the pair (value, details) at index i, i+1 is not
1888 // a transition or null descriptor, mark the value.
1889 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1891 Object** slot = contents->data_start() + i;
1892 if (!(*slot)->IsHeapObject()) continue;
1893 HeapObject* value = HeapObject::cast(*slot);
1895 RecordSlot(slot, slot, *slot);
1897 switch (details.type()) {
1900 case CONSTANT_FUNCTION:
1903 MarkObjectAndPush(value);
1906 if (!value->IsAccessorPair()) {
1907 MarkObjectAndPush(value);
1908 } else if (!MarkObjectWithoutPush(value)) {
1909 MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
1910 MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
1913 case ELEMENTS_TRANSITION:
1914 // For maps with multiple elements transitions, the transition maps are
1915 // stored in a FixedArray. Keep the fixed array alive but not the maps
1916 // that it refers to.
1917 if (value->IsFixedArray()) MarkObjectWithoutPush(value);
1919 case MAP_TRANSITION:
1920 case CONSTANT_TRANSITION:
1921 case NULL_DESCRIPTOR:
1925 // The DescriptorArray descriptors contains a pointer to its contents array,
1926 // but the contents array is already marked.
1927 marking_deque_.PushBlack(descriptors);
1931 void MarkCompactCollector::CreateBackPointers() {
1932 HeapObjectIterator iterator(heap()->map_space());
1933 for (HeapObject* next_object = iterator.Next();
1934 next_object != NULL; next_object = iterator.Next()) {
1935 if (next_object->IsMap()) { // Could also be FreeSpace object on free list.
1936 Map* map = Map::cast(next_object);
1937 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1938 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
1939 map->CreateBackPointers();
1941 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
1948 // Fill the marking stack with overflowed objects returned by the given
1949 // iterator. Stop when the marking stack is filled or the end of the space
1950 // is reached, whichever comes first.
1952 static void DiscoverGreyObjectsWithIterator(Heap* heap,
1953 MarkingDeque* marking_deque,
1955 // The caller should ensure that the marking stack is initially not full,
1956 // so that we don't waste effort pointlessly scanning for objects.
1957 ASSERT(!marking_deque->IsFull());
1959 Map* filler_map = heap->one_pointer_filler_map();
1960 for (HeapObject* object = it->Next();
1962 object = it->Next()) {
1963 MarkBit markbit = Marking::MarkBitFrom(object);
1964 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1965 Marking::GreyToBlack(markbit);
1966 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
1967 marking_deque->PushBlack(object);
1968 if (marking_deque->IsFull()) return;
1974 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1977 static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
1978 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1979 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1980 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1981 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1983 MarkBit::CellType* cells = p->markbits()->cells();
1985 int last_cell_index =
1986 Bitmap::IndexToCell(
1987 Bitmap::CellAlignIndex(
1988 p->AddressToMarkbitIndex(p->area_end())));
1990 Address cell_base = p->area_start();
1991 int cell_index = Bitmap::IndexToCell(
1992 Bitmap::CellAlignIndex(
1993 p->AddressToMarkbitIndex(cell_base)));
1997 cell_index < last_cell_index;
1998 cell_index++, cell_base += 32 * kPointerSize) {
1999 ASSERT((unsigned)cell_index ==
2000 Bitmap::IndexToCell(
2001 Bitmap::CellAlignIndex(
2002 p->AddressToMarkbitIndex(cell_base))));
2004 const MarkBit::CellType current_cell = cells[cell_index];
2005 if (current_cell == 0) continue;
2007 const MarkBit::CellType next_cell = cells[cell_index + 1];
2008 MarkBit::CellType grey_objects = current_cell &
2009 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
2012 while (grey_objects != 0) {
2013 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
2014 grey_objects >>= trailing_zeros;
2015 offset += trailing_zeros;
2016 MarkBit markbit(&cells[cell_index], 1 << offset, false);
2017 ASSERT(Marking::IsGrey(markbit));
2018 Marking::GreyToBlack(markbit);
2019 Address addr = cell_base + offset * kPointerSize;
2020 HeapObject* object = HeapObject::FromAddress(addr);
2021 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
2022 marking_deque->PushBlack(object);
2023 if (marking_deque->IsFull()) return;
2028 grey_objects >>= (Bitmap::kBitsPerCell - 1);
2033 static void DiscoverGreyObjectsInSpace(Heap* heap,
2034 MarkingDeque* marking_deque,
2035 PagedSpace* space) {
2036 if (!space->was_swept_conservatively()) {
2037 HeapObjectIterator it(space);
2038 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2040 PageIterator it(space);
2041 while (it.has_next()) {
2042 Page* p = it.next();
2043 DiscoverGreyObjectsOnPage(marking_deque, p);
2044 if (marking_deque->IsFull()) return;
2050 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
2052 if (!o->IsHeapObject()) return false;
2053 HeapObject* heap_object = HeapObject::cast(o);
2054 MarkBit mark = Marking::MarkBitFrom(heap_object);
2059 void MarkCompactCollector::MarkSymbolTable() {
2060 SymbolTable* symbol_table = heap()->symbol_table();
2061 // Mark the symbol table itself.
2062 MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table);
2063 SetMark(symbol_table, symbol_table_mark);
2064 // Explicitly mark the prefix.
2065 MarkingVisitor marker(heap());
2066 symbol_table->IteratePrefix(&marker);
2067 ProcessMarkingDeque();
2071 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2072 // Mark the heap roots including global variables, stack variables,
2073 // etc., and all objects reachable from them.
2074 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2076 // Handle the symbol table specially.
2079 // There may be overflowed objects in the heap. Visit them now.
2080 while (marking_deque_.overflowed()) {
2081 RefillMarkingDeque();
2082 EmptyMarkingDeque();
2087 void MarkCompactCollector::MarkObjectGroups() {
2088 List<ObjectGroup*>* object_groups =
2089 heap()->isolate()->global_handles()->object_groups();
2092 for (int i = 0; i < object_groups->length(); i++) {
2093 ObjectGroup* entry = object_groups->at(i);
2094 ASSERT(entry != NULL);
2096 Object*** objects = entry->objects_;
2097 bool group_marked = false;
2098 for (size_t j = 0; j < entry->length_; j++) {
2099 Object* object = *objects[j];
2100 if (object->IsHeapObject()) {
2101 HeapObject* heap_object = HeapObject::cast(object);
2102 MarkBit mark = Marking::MarkBitFrom(heap_object);
2104 group_marked = true;
2110 if (!group_marked) {
2111 (*object_groups)[last++] = entry;
2115 // An object in the group is marked, so mark as grey all white heap
2116 // objects in the group.
2117 for (size_t j = 0; j < entry->length_; ++j) {
2118 Object* object = *objects[j];
2119 if (object->IsHeapObject()) {
2120 HeapObject* heap_object = HeapObject::cast(object);
2121 MarkBit mark = Marking::MarkBitFrom(heap_object);
2122 MarkObject(heap_object, mark);
2126 // Once the entire group has been colored grey, set the object group
2127 // to NULL so it won't be processed again.
2129 object_groups->at(i) = NULL;
2131 object_groups->Rewind(last);
2135 void MarkCompactCollector::MarkImplicitRefGroups() {
2136 List<ImplicitRefGroup*>* ref_groups =
2137 heap()->isolate()->global_handles()->implicit_ref_groups();
2140 for (int i = 0; i < ref_groups->length(); i++) {
2141 ImplicitRefGroup* entry = ref_groups->at(i);
2142 ASSERT(entry != NULL);
2144 if (!IsMarked(*entry->parent_)) {
2145 (*ref_groups)[last++] = entry;
2149 Object*** children = entry->children_;
2150 // A parent object is marked, so mark all child heap objects.
2151 for (size_t j = 0; j < entry->length_; ++j) {
2152 if ((*children[j])->IsHeapObject()) {
2153 HeapObject* child = HeapObject::cast(*children[j]);
2154 MarkBit mark = Marking::MarkBitFrom(child);
2155 MarkObject(child, mark);
2159 // Once the entire group has been marked, dispose it because it's
2160 // not needed anymore.
2163 ref_groups->Rewind(last);
2167 // Mark all objects reachable from the objects on the marking stack.
2168 // Before: the marking stack contains zero or more heap object pointers.
2169 // After: the marking stack is empty, and all objects reachable from the
2170 // marking stack have been marked, or are overflowed in the heap.
2171 void MarkCompactCollector::EmptyMarkingDeque() {
2172 while (!marking_deque_.IsEmpty()) {
2173 while (!marking_deque_.IsEmpty()) {
2174 HeapObject* object = marking_deque_.Pop();
2175 ASSERT(object->IsHeapObject());
2176 ASSERT(heap()->Contains(object));
2177 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2179 Map* map = object->map();
2180 MarkBit map_mark = Marking::MarkBitFrom(map);
2181 MarkObject(map, map_mark);
2183 StaticMarkingVisitor::IterateBody(map, object);
2186 // Process encountered weak maps, mark objects only reachable by those
2187 // weak maps and repeat until fix-point is reached.
2193 // Sweep the heap for overflowed objects, clear their overflow bits, and
2194 // push them on the marking stack. Stop early if the marking stack fills
2195 // before sweeping completes. If sweeping completes, there are no remaining
2196 // overflowed objects in the heap so the overflow flag on the markings stack
2198 void MarkCompactCollector::RefillMarkingDeque() {
2199 ASSERT(marking_deque_.overflowed());
2201 SemiSpaceIterator new_it(heap()->new_space());
2202 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
2203 if (marking_deque_.IsFull()) return;
2205 DiscoverGreyObjectsInSpace(heap(),
2207 heap()->old_pointer_space());
2208 if (marking_deque_.IsFull()) return;
2210 DiscoverGreyObjectsInSpace(heap(),
2212 heap()->old_data_space());
2213 if (marking_deque_.IsFull()) return;
2215 DiscoverGreyObjectsInSpace(heap(),
2217 heap()->code_space());
2218 if (marking_deque_.IsFull()) return;
2220 DiscoverGreyObjectsInSpace(heap(),
2222 heap()->map_space());
2223 if (marking_deque_.IsFull()) return;
2225 DiscoverGreyObjectsInSpace(heap(),
2227 heap()->cell_space());
2228 if (marking_deque_.IsFull()) return;
2230 LargeObjectIterator lo_it(heap()->lo_space());
2231 DiscoverGreyObjectsWithIterator(heap(),
2234 if (marking_deque_.IsFull()) return;
2236 marking_deque_.ClearOverflowed();
2240 // Mark all objects reachable (transitively) from objects on the marking
2241 // stack. Before: the marking stack contains zero or more heap object
2242 // pointers. After: the marking stack is empty and there are no overflowed
2243 // objects in the heap.
2244 void MarkCompactCollector::ProcessMarkingDeque() {
2245 EmptyMarkingDeque();
2246 while (marking_deque_.overflowed()) {
2247 RefillMarkingDeque();
2248 EmptyMarkingDeque();
2253 void MarkCompactCollector::ProcessExternalMarking() {
2254 bool work_to_do = true;
2255 ASSERT(marking_deque_.IsEmpty());
2256 while (work_to_do) {
2258 MarkImplicitRefGroups();
2259 work_to_do = !marking_deque_.IsEmpty();
2260 ProcessMarkingDeque();
2265 void MarkCompactCollector::MarkLiveObjects() {
2266 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
2267 // The recursive GC marker detects when it is nearing stack overflow,
2268 // and switches to a different marking system. JS interrupts interfere
2269 // with the C stack limit check.
2270 PostponeInterruptsScope postpone(heap()->isolate());
2272 bool incremental_marking_overflowed = false;
2273 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2274 if (was_marked_incrementally_) {
2275 // Finalize the incremental marking and check whether we had an overflow.
2276 // Both markers use grey color to mark overflowed objects so
2277 // non-incremental marker can deal with them as if overflow
2278 // occured during normal marking.
2279 // But incremental marker uses a separate marking deque
2280 // so we have to explicitly copy it's overflow state.
2281 incremental_marking->Finalize();
2282 incremental_marking_overflowed =
2283 incremental_marking->marking_deque()->overflowed();
2284 incremental_marking->marking_deque()->ClearOverflowed();
2286 // Abort any pending incremental activities e.g. incremental sweeping.
2287 incremental_marking->Abort();
2291 ASSERT(state_ == PREPARE_GC);
2292 state_ = MARK_LIVE_OBJECTS;
2294 // The to space contains live objects, a page in from space is used as a
2296 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2297 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2298 if (FLAG_force_marking_deque_overflows) {
2299 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2301 marking_deque_.Initialize(marking_deque_start,
2303 ASSERT(!marking_deque_.overflowed());
2305 if (incremental_marking_overflowed) {
2306 // There are overflowed objects left in the heap after incremental marking.
2307 marking_deque_.SetOverflowed();
2310 PrepareForCodeFlushing();
2312 if (was_marked_incrementally_) {
2313 // There is no write barrier on cells so we have to scan them now at the end
2314 // of the incremental marking.
2316 HeapObjectIterator cell_iterator(heap()->cell_space());
2318 while ((cell = cell_iterator.Next()) != NULL) {
2319 ASSERT(cell->IsJSGlobalPropertyCell());
2320 if (IsMarked(cell)) {
2321 int offset = JSGlobalPropertyCell::kValueOffset;
2322 StaticMarkingVisitor::VisitPointer(
2324 reinterpret_cast<Object**>(cell->address() + offset));
2330 RootMarkingVisitor root_visitor(heap());
2331 MarkRoots(&root_visitor);
2333 // The objects reachable from the roots are marked, yet unreachable
2334 // objects are unmarked. Mark objects reachable due to host
2335 // application specific logic.
2336 ProcessExternalMarking();
2338 // The objects reachable from the roots or object groups are marked,
2339 // yet unreachable objects are unmarked. Mark objects reachable
2340 // only from weak global handles.
2342 // First we identify nonlive weak handles and mark them as pending
2344 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2345 &IsUnmarkedHeapObject);
2346 // Then we mark the objects and process the transitive closure.
2347 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2348 while (marking_deque_.overflowed()) {
2349 RefillMarkingDeque();
2350 EmptyMarkingDeque();
2353 // Repeat host application specific marking to mark unmarked objects
2354 // reachable from the weak roots.
2355 ProcessExternalMarking();
2361 void MarkCompactCollector::AfterMarking() {
2362 // Object literal map caches reference symbols (cache keys) and maps
2363 // (cache values). At this point still useful maps have already been
2364 // marked. Mark the keys for the alive values before we process the
2368 // Prune the symbol table removing all symbols only pointed to by the
2369 // symbol table. Cannot use symbol_table() here because the symbol
2371 SymbolTable* symbol_table = heap()->symbol_table();
2372 SymbolTableCleaner v(heap());
2373 symbol_table->IterateElements(&v);
2374 symbol_table->ElementsRemoved(v.PointersRemoved());
2375 heap()->external_string_table_.Iterate(&v);
2376 heap()->external_string_table_.CleanUp();
2378 // Process the weak references.
2379 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2380 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
2382 // Remove object groups after marking phase.
2383 heap()->isolate()->global_handles()->RemoveObjectGroups();
2384 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2386 // Flush code from collected candidates.
2387 if (is_code_flushing_enabled()) {
2388 code_flusher_->ProcessCandidates();
2391 if (!FLAG_watch_ic_patching) {
2392 // Clean up dead objects from the runtime profiler.
2393 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2398 void MarkCompactCollector::ProcessMapCaches() {
2399 Object* raw_context = heap()->global_contexts_list_;
2400 while (raw_context != heap()->undefined_value()) {
2401 Context* context = reinterpret_cast<Context*>(raw_context);
2402 if (IsMarked(context)) {
2403 HeapObject* raw_map_cache =
2404 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2405 // A map cache may be reachable from the stack. In this case
2406 // it's already transitively marked and it's too late to clean
2408 if (!IsMarked(raw_map_cache) &&
2409 raw_map_cache != heap()->undefined_value()) {
2410 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2411 int existing_elements = map_cache->NumberOfElements();
2412 int used_elements = 0;
2413 for (int i = MapCache::kElementsStartIndex;
2414 i < map_cache->length();
2415 i += MapCache::kEntrySize) {
2416 Object* raw_key = map_cache->get(i);
2417 if (raw_key == heap()->undefined_value() ||
2418 raw_key == heap()->the_hole_value()) continue;
2419 STATIC_ASSERT(MapCache::kEntrySize == 2);
2420 Object* raw_map = map_cache->get(i + 1);
2421 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
2424 // Delete useless entries with unmarked maps.
2425 ASSERT(raw_map->IsMap());
2426 map_cache->set_the_hole(i);
2427 map_cache->set_the_hole(i + 1);
2430 if (used_elements == 0) {
2431 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2433 // Note: we don't actually shrink the cache here to avoid
2434 // extra complexity during GC. We rely on subsequent cache
2435 // usages (EnsureCapacity) to do this.
2436 map_cache->ElementsRemoved(existing_elements - used_elements);
2437 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2438 MarkObject(map_cache, map_cache_markbit);
2442 // Move to next element in the list.
2443 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2445 ProcessMarkingDeque();
2449 void MarkCompactCollector::ReattachInitialMaps() {
2450 HeapObjectIterator map_iterator(heap()->map_space());
2451 for (HeapObject* obj = map_iterator.Next();
2453 obj = map_iterator.Next()) {
2454 if (obj->IsFreeSpace()) continue;
2455 Map* map = Map::cast(obj);
2457 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2458 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
2460 if (map->attached_to_shared_function_info()) {
2461 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2467 void MarkCompactCollector::ClearNonLiveTransitions() {
2468 HeapObjectIterator map_iterator(heap()->map_space());
2469 // Iterate over the map space, setting map transitions that go from
2470 // a marked map to an unmarked map to null transitions. At the same time,
2471 // set all the prototype fields of maps back to their original value,
2472 // dropping the back pointers temporarily stored in the prototype field.
2473 // Setting the prototype field requires following the linked list of
2474 // back pointers, reversing them all at once. This allows us to find
2475 // those maps with map transitions that need to be nulled, and only
2476 // scan the descriptor arrays of those maps, not all maps.
2477 // All of these actions are carried out only on maps of JSObjects
2478 // and related subtypes.
2479 for (HeapObject* obj = map_iterator.Next();
2480 obj != NULL; obj = map_iterator.Next()) {
2481 Map* map = reinterpret_cast<Map*>(obj);
2482 MarkBit map_mark = Marking::MarkBitFrom(map);
2483 if (map->IsFreeSpace()) continue;
2485 ASSERT(map->IsMap());
2486 // Only JSObject and subtypes have map transitions and back pointers.
2487 STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE);
2488 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
2490 if (map_mark.Get() &&
2491 map->attached_to_shared_function_info()) {
2492 // This map is used for inobject slack tracking and has been detached
2493 // from SharedFunctionInfo during the mark phase.
2494 // Since it survived the GC, reattach it now.
2495 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2498 ClearNonLivePrototypeTransitions(map);
2499 ClearNonLiveMapTransitions(map, map_mark);
2504 void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2505 int number_of_transitions = map->NumberOfProtoTransitions();
2506 FixedArray* prototype_transitions = map->prototype_transitions();
2508 int new_number_of_transitions = 0;
2509 const int header = Map::kProtoTransitionHeaderSize;
2510 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2511 const int map_offset = header + Map::kProtoTransitionMapOffset;
2512 const int step = Map::kProtoTransitionElementsPerEntry;
2513 for (int i = 0; i < number_of_transitions; i++) {
2514 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2515 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2516 if (IsMarked(prototype) && IsMarked(cached_map)) {
2517 int proto_index = proto_offset + new_number_of_transitions * step;
2518 int map_index = map_offset + new_number_of_transitions * step;
2519 if (new_number_of_transitions != i) {
2520 prototype_transitions->set_unchecked(
2524 UPDATE_WRITE_BARRIER);
2525 prototype_transitions->set_unchecked(
2529 SKIP_WRITE_BARRIER);
2532 HeapObject::RawField(prototype_transitions,
2533 FixedArray::OffsetOfElementAt(proto_index));
2534 RecordSlot(slot, slot, prototype);
2535 new_number_of_transitions++;
2539 if (new_number_of_transitions != number_of_transitions) {
2540 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2543 // Fill slots that became free with undefined value.
2544 for (int i = new_number_of_transitions * step;
2545 i < number_of_transitions * step;
2547 prototype_transitions->set_undefined(heap_, header + i);
2552 void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2554 // Follow the chain of back pointers to find the prototype.
2555 Object* real_prototype = map;
2556 while (real_prototype->IsMap()) {
2557 real_prototype = Map::cast(real_prototype)->prototype();
2558 ASSERT(real_prototype->IsHeapObject());
2561 // Follow back pointers, setting them to prototype, clearing map transitions
2564 bool current_is_alive = map_mark.Get();
2565 bool on_dead_path = !current_is_alive;
2566 while (current->IsMap()) {
2567 Object* next = current->prototype();
2568 // There should never be a dead map above a live map.
2569 ASSERT(on_dead_path || current_is_alive);
2571 // A live map above a dead map indicates a dead transition. This test will
2572 // always be false on the first iteration.
2573 if (on_dead_path && current_is_alive) {
2574 on_dead_path = false;
2575 current->ClearNonLiveTransitions(heap(), real_prototype);
2578 Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset);
2579 *slot = real_prototype;
2580 if (current_is_alive) RecordSlot(slot, slot, real_prototype);
2582 current = reinterpret_cast<Map*>(next);
2583 current_is_alive = Marking::MarkBitFrom(current).Get();
2588 void MarkCompactCollector::ProcessWeakMaps() {
2589 Object* weak_map_obj = encountered_weak_maps();
2590 while (weak_map_obj != Smi::FromInt(0)) {
2591 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2592 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2593 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2594 for (int i = 0; i < table->Capacity(); i++) {
2595 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2596 Object* value = table->get(table->EntryToValueIndex(i));
2597 StaticMarkingVisitor::VisitPointer(heap(), &value);
2598 table->set_unchecked(heap(),
2599 table->EntryToValueIndex(i),
2601 UPDATE_WRITE_BARRIER);
2604 weak_map_obj = weak_map->next();
2609 void MarkCompactCollector::ClearWeakMaps() {
2610 Object* weak_map_obj = encountered_weak_maps();
2611 while (weak_map_obj != Smi::FromInt(0)) {
2612 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
2613 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
2614 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
2615 for (int i = 0; i < table->Capacity(); i++) {
2616 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2617 table->RemoveEntry(i);
2620 weak_map_obj = weak_map->next();
2621 weak_map->set_next(Smi::FromInt(0));
2623 set_encountered_weak_maps(Smi::FromInt(0));
2627 // We scavange new space simultaneously with sweeping. This is done in two
2630 // The first pass migrates all alive objects from one semispace to another or
2631 // promotes them to old space. Forwarding address is written directly into
2632 // first word of object without any encoding. If object is dead we write
2633 // NULL as a forwarding address.
2635 // The second pass updates pointers to new space in all spaces. It is possible
2636 // to encounter pointers to dead new space objects during traversal of pointers
2637 // to new space. We should clear them to avoid encountering them during next
2638 // pointer iteration. This is an issue if the store buffer overflows and we
2639 // have to scan the entire old space, including dead objects, looking for
2640 // pointers to new space.
2641 void MarkCompactCollector::MigrateObject(Address dst,
2644 AllocationSpace dest) {
2645 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2646 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2647 Address src_slot = src;
2648 Address dst_slot = dst;
2649 ASSERT(IsAligned(size, kPointerSize));
2651 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2652 Object* value = Memory::Object_at(src_slot);
2654 Memory::Object_at(dst_slot) = value;
2656 if (heap_->InNewSpace(value)) {
2657 heap_->store_buffer()->Mark(dst_slot);
2658 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2659 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2660 &migration_slots_buffer_,
2661 reinterpret_cast<Object**>(dst_slot),
2662 SlotsBuffer::IGNORE_OVERFLOW);
2665 src_slot += kPointerSize;
2666 dst_slot += kPointerSize;
2669 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2670 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2671 Address code_entry = Memory::Address_at(code_entry_slot);
2673 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2674 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2675 &migration_slots_buffer_,
2676 SlotsBuffer::CODE_ENTRY_SLOT,
2678 SlotsBuffer::IGNORE_OVERFLOW);
2681 } else if (dest == CODE_SPACE) {
2682 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2683 heap()->MoveBlock(dst, src, size);
2684 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2685 &migration_slots_buffer_,
2686 SlotsBuffer::RELOCATED_CODE_OBJECT,
2688 SlotsBuffer::IGNORE_OVERFLOW);
2689 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2691 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2692 heap()->MoveBlock(dst, src, size);
2694 Memory::Address_at(src) = dst;
2698 // Visitor for updating pointers from live objects in old spaces to new space.
2699 // It does not expect to encounter pointers to dead objects.
2700 class PointersUpdatingVisitor: public ObjectVisitor {
2702 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
2704 void VisitPointer(Object** p) {
2708 void VisitPointers(Object** start, Object** end) {
2709 for (Object** p = start; p < end; p++) UpdatePointer(p);
2712 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2713 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2714 Object* target = rinfo->target_object();
2715 VisitPointer(&target);
2716 rinfo->set_target_object(target);
2719 void VisitCodeTarget(RelocInfo* rinfo) {
2720 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2721 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2722 VisitPointer(&target);
2723 rinfo->set_target_address(Code::cast(target)->instruction_start());
2726 void VisitDebugTarget(RelocInfo* rinfo) {
2727 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2728 rinfo->IsPatchedReturnSequence()) ||
2729 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2730 rinfo->IsPatchedDebugBreakSlotSequence()));
2731 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2732 VisitPointer(&target);
2733 rinfo->set_call_address(Code::cast(target)->instruction_start());
2736 static inline void UpdateSlot(Heap* heap, Object** slot) {
2737 Object* obj = *slot;
2739 if (!obj->IsHeapObject()) return;
2741 HeapObject* heap_obj = HeapObject::cast(obj);
2743 MapWord map_word = heap_obj->map_word();
2744 if (map_word.IsForwardingAddress()) {
2745 ASSERT(heap->InFromSpace(heap_obj) ||
2746 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2747 HeapObject* target = map_word.ToForwardingAddress();
2749 ASSERT(!heap->InFromSpace(target) &&
2750 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2755 inline void UpdatePointer(Object** p) {
2756 UpdateSlot(heap_, p);
2763 static void UpdatePointer(HeapObject** p, HeapObject* object) {
2764 ASSERT(*p == object);
2766 Address old_addr = object->address();
2768 Address new_addr = Memory::Address_at(old_addr);
2770 // The new space sweep will overwrite the map word of dead objects
2771 // with NULL. In this case we do not need to transfer this entry to
2772 // the store buffer which we are rebuilding.
2773 if (new_addr != NULL) {
2774 *p = HeapObject::FromAddress(new_addr);
2776 // We have to zap this pointer, because the store buffer may overflow later,
2777 // and then we have to scan the entire heap and we don't want to find
2778 // spurious newspace pointers in the old space.
2779 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0));
2784 static HeapObject* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2786 MapWord map_word = HeapObject::cast(*p)->map_word();
2788 if (map_word.IsForwardingAddress()) {
2789 return HeapObject::cast(map_word.ToForwardingAddress());
2792 return HeapObject::cast(*p);
2796 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2800 if (object_size > Page::kMaxNonCodeHeapObjectSize) {
2801 MaybeObject* maybe_result =
2802 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
2803 if (maybe_result->ToObject(&result)) {
2804 HeapObject* target = HeapObject::cast(result);
2805 MigrateObject(target->address(),
2809 heap()->mark_compact_collector()->tracer()->
2810 increment_promoted_objects_size(object_size);
2814 OldSpace* target_space = heap()->TargetSpace(object);
2816 ASSERT(target_space == heap()->old_pointer_space() ||
2817 target_space == heap()->old_data_space());
2818 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2819 if (maybe_result->ToObject(&result)) {
2820 HeapObject* target = HeapObject::cast(result);
2821 MigrateObject(target->address(),
2824 target_space->identity());
2825 heap()->mark_compact_collector()->tracer()->
2826 increment_promoted_objects_size(object_size);
2835 void MarkCompactCollector::EvacuateNewSpace() {
2836 // There are soft limits in the allocation code, designed trigger a mark
2837 // sweep collection by failing allocations. But since we are already in
2838 // a mark-sweep allocation, there is no sense in trying to trigger one.
2839 AlwaysAllocateScope scope;
2840 heap()->CheckNewSpaceExpansionCriteria();
2842 NewSpace* new_space = heap()->new_space();
2844 // Store allocation range before flipping semispaces.
2845 Address from_bottom = new_space->bottom();
2846 Address from_top = new_space->top();
2848 // Flip the semispaces. After flipping, to space is empty, from space has
2851 new_space->ResetAllocationInfo();
2853 int survivors_size = 0;
2855 // First pass: traverse all objects in inactive semispace, remove marks,
2856 // migrate live objects and write forwarding addresses. This stage puts
2857 // new entries in the store buffer and may cause some pages to be marked
2858 // scan-on-scavenge.
2859 SemiSpaceIterator from_it(from_bottom, from_top);
2860 for (HeapObject* object = from_it.Next();
2862 object = from_it.Next()) {
2863 MarkBit mark_bit = Marking::MarkBitFrom(object);
2864 if (mark_bit.Get()) {
2866 // Don't bother decrementing live bytes count. We'll discard the
2867 // entire page at the end.
2868 int size = object->Size();
2869 survivors_size += size;
2871 // Aggressively promote young survivors to the old space.
2872 if (TryPromoteObject(object, size)) {
2876 // Promotion failed. Just migrate object to another semispace.
2877 MaybeObject* allocation = new_space->AllocateRaw(size);
2878 if (allocation->IsFailure()) {
2879 if (!new_space->AddFreshPage()) {
2880 // Shouldn't happen. We are sweeping linearly, and to-space
2881 // has the same number of pages as from-space, so there is
2885 allocation = new_space->AllocateRaw(size);
2886 ASSERT(!allocation->IsFailure());
2888 Object* target = allocation->ToObjectUnchecked();
2890 MigrateObject(HeapObject::cast(target)->address(),
2895 // Process the dead object before we write a NULL into its header.
2896 LiveObjectList::ProcessNonLive(object);
2898 // Mark dead objects in the new space with null in their map field.
2899 Memory::Address_at(object->address()) = NULL;
2903 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2904 new_space->set_age_mark(new_space->top());
2908 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2909 AlwaysAllocateScope always_allocate;
2910 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2911 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2912 MarkBit::CellType* cells = p->markbits()->cells();
2913 p->MarkSweptPrecisely();
2915 int last_cell_index =
2916 Bitmap::IndexToCell(
2917 Bitmap::CellAlignIndex(
2918 p->AddressToMarkbitIndex(p->area_end())));
2920 Address cell_base = p->area_start();
2921 int cell_index = Bitmap::IndexToCell(
2922 Bitmap::CellAlignIndex(
2923 p->AddressToMarkbitIndex(cell_base)));
2928 cell_index < last_cell_index;
2929 cell_index++, cell_base += 32 * kPointerSize) {
2930 ASSERT((unsigned)cell_index ==
2931 Bitmap::IndexToCell(
2932 Bitmap::CellAlignIndex(
2933 p->AddressToMarkbitIndex(cell_base))));
2934 if (cells[cell_index] == 0) continue;
2936 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2937 for (int i = 0; i < live_objects; i++) {
2938 Address object_addr = cell_base + offsets[i] * kPointerSize;
2939 HeapObject* object = HeapObject::FromAddress(object_addr);
2940 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2942 int size = object->Size();
2944 MaybeObject* target = space->AllocateRaw(size);
2945 if (target->IsFailure()) {
2946 // OS refused to give us memory.
2947 V8::FatalProcessOutOfMemory("Evacuation");
2951 Object* target_object = target->ToObjectUnchecked();
2953 MigrateObject(HeapObject::cast(target_object)->address(),
2957 ASSERT(object->map_word().IsForwardingAddress());
2960 // Clear marking bits for current cell.
2961 cells[cell_index] = 0;
2963 p->ResetLiveBytes();
2967 void MarkCompactCollector::EvacuatePages() {
2968 int npages = evacuation_candidates_.length();
2969 for (int i = 0; i < npages; i++) {
2970 Page* p = evacuation_candidates_[i];
2971 ASSERT(p->IsEvacuationCandidate() ||
2972 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2973 if (p->IsEvacuationCandidate()) {
2974 // During compaction we might have to request a new page.
2975 // Check that space still have room for that.
2976 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2977 EvacuateLiveObjectsFromPage(p);
2979 // Without room for expansion evacuation is not guaranteed to succeed.
2980 // Pessimistically abandon unevacuated pages.
2981 for (int j = i; j < npages; j++) {
2982 Page* page = evacuation_candidates_[j];
2983 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2984 page->ClearEvacuationCandidate();
2985 page->SetFlag(Page::RESCAN_ON_EVACUATION);
2994 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2996 virtual Object* RetainAs(Object* object) {
2997 if (object->IsHeapObject()) {
2998 HeapObject* heap_object = HeapObject::cast(object);
2999 MapWord map_word = heap_object->map_word();
3000 if (map_word.IsForwardingAddress()) {
3001 return map_word.ToForwardingAddress();
3009 static inline void UpdateSlot(ObjectVisitor* v,
3010 SlotsBuffer::SlotType slot_type,
3012 switch (slot_type) {
3013 case SlotsBuffer::CODE_TARGET_SLOT: {
3014 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
3018 case SlotsBuffer::CODE_ENTRY_SLOT: {
3019 v->VisitCodeEntry(addr);
3022 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3023 HeapObject* obj = HeapObject::FromAddress(addr);
3024 Code::cast(obj)->CodeIterateBody(v);
3027 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3028 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
3029 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
3032 case SlotsBuffer::JS_RETURN_SLOT: {
3033 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
3034 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
3037 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3038 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
3051 SWEEP_AND_VISIT_LIVE_OBJECTS
3055 enum SkipListRebuildingMode {
3061 // Sweep a space precisely. After this has been done the space can
3062 // be iterated precisely, hitting only the live objects. Code space
3063 // is always swept precisely because we want to be able to iterate
3064 // over it. Map space is swept precisely, because it is not compacted.
3065 // Slots in live objects pointing into evacuation candidates are updated
3067 template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3068 static void SweepPrecisely(PagedSpace* space,
3071 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3072 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3073 space->identity() == CODE_SPACE);
3074 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3076 MarkBit::CellType* cells = p->markbits()->cells();
3077 p->MarkSweptPrecisely();
3079 int last_cell_index =
3080 Bitmap::IndexToCell(
3081 Bitmap::CellAlignIndex(
3082 p->AddressToMarkbitIndex(p->area_end())));
3084 Address free_start = p->area_start();
3086 Bitmap::IndexToCell(
3087 Bitmap::CellAlignIndex(
3088 p->AddressToMarkbitIndex(free_start)));
3090 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3091 Address object_address = free_start;
3094 SkipList* skip_list = p->skip_list();
3095 int curr_region = -1;
3096 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3101 cell_index < last_cell_index;
3102 cell_index++, object_address += 32 * kPointerSize) {
3103 ASSERT((unsigned)cell_index ==
3104 Bitmap::IndexToCell(
3105 Bitmap::CellAlignIndex(
3106 p->AddressToMarkbitIndex(object_address))));
3107 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
3109 for ( ; live_objects != 0; live_objects--) {
3110 Address free_end = object_address + offsets[live_index++] * kPointerSize;
3111 if (free_end != free_start) {
3112 space->Free(free_start, static_cast<int>(free_end - free_start));
3114 HeapObject* live_object = HeapObject::FromAddress(free_end);
3115 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3116 Map* map = live_object->map();
3117 int size = live_object->SizeFromMap(map);
3118 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3119 live_object->IterateBody(map->instance_type(), size, v);
3121 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3122 int new_region_start =
3123 SkipList::RegionNumber(free_end);
3124 int new_region_end =
3125 SkipList::RegionNumber(free_end + size - kPointerSize);
3126 if (new_region_start != curr_region ||
3127 new_region_end != curr_region) {
3128 skip_list->AddObject(free_end, size);
3129 curr_region = new_region_end;
3132 free_start = free_end + size;
3134 // Clear marking bits for current cell.
3135 cells[cell_index] = 0;
3137 if (free_start != p->area_end()) {
3138 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
3140 p->ResetLiveBytes();
3144 static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3145 Page* p = Page::FromAddress(code->address());
3147 if (p->IsEvacuationCandidate() ||
3148 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3152 Address code_start = code->address();
3153 Address code_end = code_start + code->Size();
3155 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3156 uint32_t end_index =
3157 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3159 Bitmap* b = p->markbits();
3161 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3162 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3164 MarkBit::CellType* start_cell = start_mark_bit.cell();
3165 MarkBit::CellType* end_cell = end_mark_bit.cell();
3168 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3169 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3171 if (start_cell == end_cell) {
3172 *start_cell |= start_mask & end_mask;
3174 *start_cell |= start_mask;
3175 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3178 *end_cell |= end_mask;
3181 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3190 static bool IsOnInvalidatedCodeObject(Address addr) {
3191 // We did not record any slots in large objects thus
3192 // we can safely go to the page from the slot address.
3193 Page* p = Page::FromAddress(addr);
3195 // First check owner's identity because old pointer and old data spaces
3196 // are swept lazily and might still have non-zero mark-bits on some
3198 if (p->owner()->identity() != CODE_SPACE) return false;
3200 // In code space only bits on evacuation candidates (but we don't record
3201 // any slots on them) and under invalidated code objects are non-zero.
3203 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3205 return mark_bit.Get();
3209 void MarkCompactCollector::InvalidateCode(Code* code) {
3210 if (heap_->incremental_marking()->IsCompacting() &&
3211 !ShouldSkipEvacuationSlotRecording(code)) {
3212 ASSERT(compacting_);
3214 // If the object is white than no slots were recorded on it yet.
3215 MarkBit mark_bit = Marking::MarkBitFrom(code);
3216 if (Marking::IsWhite(mark_bit)) return;
3218 invalidated_code_.Add(code);
3223 bool MarkCompactCollector::MarkInvalidatedCode() {
3224 bool code_marked = false;
3226 int length = invalidated_code_.length();
3227 for (int i = 0; i < length; i++) {
3228 Code* code = invalidated_code_[i];
3230 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3239 void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3240 int length = invalidated_code_.length();
3241 for (int i = 0; i < length; i++) {
3242 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3247 void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3248 int length = invalidated_code_.length();
3249 for (int i = 0; i < length; i++) {
3250 Code* code = invalidated_code_[i];
3252 code->Iterate(visitor);
3253 SetMarkBitsUnderInvalidatedCode(code, false);
3256 invalidated_code_.Rewind(0);
3260 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3261 bool code_slots_filtering_required;
3262 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3263 code_slots_filtering_required = MarkInvalidatedCode();
3269 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3273 // Second pass: find pointers to new space and update them.
3274 PointersUpdatingVisitor updating_visitor(heap());
3276 { GCTracer::Scope gc_scope(tracer_,
3277 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3278 // Update pointers in to space.
3279 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3280 heap()->new_space()->top());
3281 for (HeapObject* object = to_it.Next();
3283 object = to_it.Next()) {
3284 Map* map = object->map();
3285 object->IterateBody(map->instance_type(),
3286 object->SizeFromMap(map),
3291 { GCTracer::Scope gc_scope(tracer_,
3292 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3294 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3295 LiveObjectList::IterateElements(&updating_visitor);
3298 { GCTracer::Scope gc_scope(tracer_,
3299 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3300 StoreBufferRebuildScope scope(heap_,
3301 heap_->store_buffer(),
3302 &Heap::ScavengeStoreBufferCallback);
3303 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3306 { GCTracer::Scope gc_scope(tracer_,
3307 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3308 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3309 migration_slots_buffer_,
3310 code_slots_filtering_required);
3311 if (FLAG_trace_fragmentation) {
3312 PrintF(" migration slots buffer: %d\n",
3313 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3316 if (compacting_ && was_marked_incrementally_) {
3317 // It's difficult to filter out slots recorded for large objects.
3318 LargeObjectIterator it(heap_->lo_space());
3319 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3320 // LargeObjectSpace is not swept yet thus we have to skip
3321 // dead objects explicitly.
3322 if (!IsMarked(obj)) continue;
3324 Page* p = Page::FromAddress(obj->address());
3325 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3326 obj->Iterate(&updating_visitor);
3327 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3333 int npages = evacuation_candidates_.length();
3334 { GCTracer::Scope gc_scope(
3335 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3336 for (int i = 0; i < npages; i++) {
3337 Page* p = evacuation_candidates_[i];
3338 ASSERT(p->IsEvacuationCandidate() ||
3339 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3341 if (p->IsEvacuationCandidate()) {
3342 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3344 code_slots_filtering_required);
3345 if (FLAG_trace_fragmentation) {
3346 PrintF(" page %p slots buffer: %d\n",
3347 reinterpret_cast<void*>(p),
3348 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3351 // Important: skip list should be cleared only after roots were updated
3352 // because root iteration traverses the stack and might have to find
3353 // code objects from non-updated pc pointing into evacuation candidate.
3354 SkipList* list = p->skip_list();
3355 if (list != NULL) list->Clear();
3357 if (FLAG_gc_verbose) {
3358 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3359 reinterpret_cast<intptr_t>(p));
3361 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3362 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3364 switch (space->identity()) {
3365 case OLD_DATA_SPACE:
3366 SweepConservatively(space, p);
3368 case OLD_POINTER_SPACE:
3369 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3370 space, p, &updating_visitor);
3373 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3374 space, p, &updating_visitor);
3384 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3386 // Update pointers from cells.
3387 HeapObjectIterator cell_iterator(heap_->cell_space());
3388 for (HeapObject* cell = cell_iterator.Next();
3390 cell = cell_iterator.Next()) {
3391 if (cell->IsJSGlobalPropertyCell()) {
3392 Address value_address =
3393 reinterpret_cast<Address>(cell) +
3394 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3395 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3399 // Update pointer from the global contexts list.
3400 updating_visitor.VisitPointer(heap_->global_contexts_list_address());
3402 heap_->symbol_table()->Iterate(&updating_visitor);
3404 // Update pointers from external string table.
3405 heap_->UpdateReferencesInExternalStringTable(
3406 &UpdateReferenceInExternalStringTableEntry);
3408 if (!FLAG_watch_ic_patching) {
3409 // Update JSFunction pointers from the runtime profiler.
3410 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3414 EvacuationWeakObjectRetainer evacuation_object_retainer;
3415 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3417 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3419 ProcessInvalidatedCode(&updating_visitor);
3422 if (FLAG_verify_heap) {
3423 VerifyEvacuation(heap_);
3427 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3428 ASSERT(migration_slots_buffer_ == NULL);
3429 for (int i = 0; i < npages; i++) {
3430 Page* p = evacuation_candidates_[i];
3431 if (!p->IsEvacuationCandidate()) continue;
3432 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3433 space->Free(p->area_start(), p->area_size());
3434 p->set_scan_on_scavenge(false);
3435 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
3436 p->ResetLiveBytes();
3437 space->ReleasePage(p);
3439 evacuation_candidates_.Rewind(0);
3440 compacting_ = false;
3444 static const int kStartTableEntriesPerLine = 5;
3445 static const int kStartTableLines = 171;
3446 static const int kStartTableInvalidLine = 127;
3447 static const int kStartTableUnusedEntry = 126;
3449 #define _ kStartTableUnusedEntry
3450 #define X kStartTableInvalidLine
3451 // Mark-bit to object start offset table.
3453 // The line is indexed by the mark bits in a byte. The first number on
3454 // the line describes the number of live object starts for the line and the
3455 // other numbers on the line describe the offsets (in words) of the object
3458 // Since objects are at least 2 words large we don't have entries for two
3459 // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3460 char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3471 2, 1, 3, _, _, // 10
3472 X, _, _, _, _, // 11
3473 X, _, _, _, _, // 12
3474 X, _, _, _, _, // 13
3475 X, _, _, _, _, // 14
3476 X, _, _, _, _, // 15
3477 1, 4, _, _, _, // 16
3478 2, 0, 4, _, _, // 17
3479 2, 1, 4, _, _, // 18
3480 X, _, _, _, _, // 19
3481 2, 2, 4, _, _, // 20
3482 3, 0, 2, 4, _, // 21
3483 X, _, _, _, _, // 22
3484 X, _, _, _, _, // 23
3485 X, _, _, _, _, // 24
3486 X, _, _, _, _, // 25
3487 X, _, _, _, _, // 26
3488 X, _, _, _, _, // 27
3489 X, _, _, _, _, // 28
3490 X, _, _, _, _, // 29
3491 X, _, _, _, _, // 30
3492 X, _, _, _, _, // 31
3493 1, 5, _, _, _, // 32
3494 2, 0, 5, _, _, // 33
3495 2, 1, 5, _, _, // 34
3496 X, _, _, _, _, // 35
3497 2, 2, 5, _, _, // 36
3498 3, 0, 2, 5, _, // 37
3499 X, _, _, _, _, // 38
3500 X, _, _, _, _, // 39
3501 2, 3, 5, _, _, // 40
3502 3, 0, 3, 5, _, // 41
3503 3, 1, 3, 5, _, // 42
3504 X, _, _, _, _, // 43
3505 X, _, _, _, _, // 44
3506 X, _, _, _, _, // 45
3507 X, _, _, _, _, // 46
3508 X, _, _, _, _, // 47
3509 X, _, _, _, _, // 48
3510 X, _, _, _, _, // 49
3511 X, _, _, _, _, // 50
3512 X, _, _, _, _, // 51
3513 X, _, _, _, _, // 52
3514 X, _, _, _, _, // 53
3515 X, _, _, _, _, // 54
3516 X, _, _, _, _, // 55
3517 X, _, _, _, _, // 56
3518 X, _, _, _, _, // 57
3519 X, _, _, _, _, // 58
3520 X, _, _, _, _, // 59
3521 X, _, _, _, _, // 60
3522 X, _, _, _, _, // 61
3523 X, _, _, _, _, // 62
3524 X, _, _, _, _, // 63
3525 1, 6, _, _, _, // 64
3526 2, 0, 6, _, _, // 65
3527 2, 1, 6, _, _, // 66
3528 X, _, _, _, _, // 67
3529 2, 2, 6, _, _, // 68
3530 3, 0, 2, 6, _, // 69
3531 X, _, _, _, _, // 70
3532 X, _, _, _, _, // 71
3533 2, 3, 6, _, _, // 72
3534 3, 0, 3, 6, _, // 73
3535 3, 1, 3, 6, _, // 74
3536 X, _, _, _, _, // 75
3537 X, _, _, _, _, // 76
3538 X, _, _, _, _, // 77
3539 X, _, _, _, _, // 78
3540 X, _, _, _, _, // 79
3541 2, 4, 6, _, _, // 80
3542 3, 0, 4, 6, _, // 81
3543 3, 1, 4, 6, _, // 82
3544 X, _, _, _, _, // 83
3545 3, 2, 4, 6, _, // 84
3546 4, 0, 2, 4, 6, // 85
3547 X, _, _, _, _, // 86
3548 X, _, _, _, _, // 87
3549 X, _, _, _, _, // 88
3550 X, _, _, _, _, // 89
3551 X, _, _, _, _, // 90
3552 X, _, _, _, _, // 91
3553 X, _, _, _, _, // 92
3554 X, _, _, _, _, // 93
3555 X, _, _, _, _, // 94
3556 X, _, _, _, _, // 95
3557 X, _, _, _, _, // 96
3558 X, _, _, _, _, // 97
3559 X, _, _, _, _, // 98
3560 X, _, _, _, _, // 99
3561 X, _, _, _, _, // 100
3562 X, _, _, _, _, // 101
3563 X, _, _, _, _, // 102
3564 X, _, _, _, _, // 103
3565 X, _, _, _, _, // 104
3566 X, _, _, _, _, // 105
3567 X, _, _, _, _, // 106
3568 X, _, _, _, _, // 107
3569 X, _, _, _, _, // 108
3570 X, _, _, _, _, // 109
3571 X, _, _, _, _, // 110
3572 X, _, _, _, _, // 111
3573 X, _, _, _, _, // 112
3574 X, _, _, _, _, // 113
3575 X, _, _, _, _, // 114
3576 X, _, _, _, _, // 115
3577 X, _, _, _, _, // 116
3578 X, _, _, _, _, // 117
3579 X, _, _, _, _, // 118
3580 X, _, _, _, _, // 119
3581 X, _, _, _, _, // 120
3582 X, _, _, _, _, // 121
3583 X, _, _, _, _, // 122
3584 X, _, _, _, _, // 123
3585 X, _, _, _, _, // 124
3586 X, _, _, _, _, // 125
3587 X, _, _, _, _, // 126
3588 X, _, _, _, _, // 127
3589 1, 7, _, _, _, // 128
3590 2, 0, 7, _, _, // 129
3591 2, 1, 7, _, _, // 130
3592 X, _, _, _, _, // 131
3593 2, 2, 7, _, _, // 132
3594 3, 0, 2, 7, _, // 133
3595 X, _, _, _, _, // 134
3596 X, _, _, _, _, // 135
3597 2, 3, 7, _, _, // 136
3598 3, 0, 3, 7, _, // 137
3599 3, 1, 3, 7, _, // 138
3600 X, _, _, _, _, // 139
3601 X, _, _, _, _, // 140
3602 X, _, _, _, _, // 141
3603 X, _, _, _, _, // 142
3604 X, _, _, _, _, // 143
3605 2, 4, 7, _, _, // 144
3606 3, 0, 4, 7, _, // 145
3607 3, 1, 4, 7, _, // 146
3608 X, _, _, _, _, // 147
3609 3, 2, 4, 7, _, // 148
3610 4, 0, 2, 4, 7, // 149
3611 X, _, _, _, _, // 150
3612 X, _, _, _, _, // 151
3613 X, _, _, _, _, // 152
3614 X, _, _, _, _, // 153
3615 X, _, _, _, _, // 154
3616 X, _, _, _, _, // 155
3617 X, _, _, _, _, // 156
3618 X, _, _, _, _, // 157
3619 X, _, _, _, _, // 158
3620 X, _, _, _, _, // 159
3621 2, 5, 7, _, _, // 160
3622 3, 0, 5, 7, _, // 161
3623 3, 1, 5, 7, _, // 162
3624 X, _, _, _, _, // 163
3625 3, 2, 5, 7, _, // 164
3626 4, 0, 2, 5, 7, // 165
3627 X, _, _, _, _, // 166
3628 X, _, _, _, _, // 167
3629 3, 3, 5, 7, _, // 168
3630 4, 0, 3, 5, 7, // 169
3631 4, 1, 3, 5, 7 // 170
3637 // Takes a word of mark bits. Returns the number of objects that start in the
3638 // range. Puts the offsets of the words in the supplied array.
3639 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3643 // No consecutive 1 bits.
3644 ASSERT((mark_bits & 0x180) != 0x180);
3645 ASSERT((mark_bits & 0x18000) != 0x18000);
3646 ASSERT((mark_bits & 0x1800000) != 0x1800000);
3648 while (mark_bits != 0) {
3649 int byte = (mark_bits & 0xff);
3652 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3653 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3654 int objects_in_these_8_words = table[0];
3655 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3656 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3657 for (int i = 0; i < objects_in_these_8_words; i++) {
3658 starts[objects++] = offset + table[1 + i];
3667 static inline Address DigestFreeStart(Address approximate_free_start,
3668 uint32_t free_start_cell) {
3669 ASSERT(free_start_cell != 0);
3671 // No consecutive 1 bits.
3672 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3675 uint32_t cell = free_start_cell;
3676 int offset_of_last_live;
3677 if ((cell & 0x80000000u) != 0) {
3678 // This case would overflow below.
3679 offset_of_last_live = 31;
3681 // Remove all but one bit, the most significant. This is an optimization
3682 // that may or may not be worthwhile.
3688 cell = (cell + 1) >> 1;
3689 int live_objects = MarkWordToObjectStarts(cell, offsets);
3690 ASSERT(live_objects == 1);
3691 offset_of_last_live = offsets[live_objects - 1];
3693 Address last_live_start =
3694 approximate_free_start + offset_of_last_live * kPointerSize;
3695 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3696 Address free_start = last_live_start + last_live->Size();
3701 static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3704 // No consecutive 1 bits.
3705 ASSERT((cell & (cell << 1)) == 0);
3708 if (cell == 0x80000000u) { // Avoid overflow below.
3709 return block_address + 31 * kPointerSize;
3711 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3712 ASSERT((first_set_bit & cell) == first_set_bit);
3713 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3714 ASSERT(live_objects == 1);
3716 return block_address + offsets[0] * kPointerSize;
3720 // Sweeps a space conservatively. After this has been done the larger free
3721 // spaces have been put on the free list and the smaller ones have been
3722 // ignored and left untouched. A free space is always either ignored or put
3723 // on the free list, never split up into two parts. This is important
3724 // because it means that any FreeSpace maps left actually describe a region of
3725 // memory that can be ignored when scanning. Dead objects other than free
3726 // spaces will not contain the free space map.
3727 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
3728 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3729 MarkBit::CellType* cells = p->markbits()->cells();
3730 p->MarkSweptConservatively();
3732 int last_cell_index =
3733 Bitmap::IndexToCell(
3734 Bitmap::CellAlignIndex(
3735 p->AddressToMarkbitIndex(p->area_end())));
3738 Bitmap::IndexToCell(
3739 Bitmap::CellAlignIndex(
3740 p->AddressToMarkbitIndex(p->area_start())));
3742 intptr_t freed_bytes = 0;
3744 // This is the start of the 32 word block that we are currently looking at.
3745 Address block_address = p->area_start();
3747 // Skip over all the dead objects at the start of the page and mark them free.
3749 cell_index < last_cell_index;
3750 cell_index++, block_address += 32 * kPointerSize) {
3751 if (cells[cell_index] != 0) break;
3753 size_t size = block_address - p->area_start();
3754 if (cell_index == last_cell_index) {
3755 freed_bytes += static_cast<int>(space->Free(p->area_start(),
3756 static_cast<int>(size)));
3757 ASSERT_EQ(0, p->LiveBytes());
3760 // Grow the size of the start-of-page free space a little to get up to the
3761 // first live object.
3762 Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3763 // Free the first free space.
3764 size = free_end - p->area_start();
3765 freed_bytes += space->Free(p->area_start(),
3766 static_cast<int>(size));
3767 // The start of the current free area is represented in undigested form by
3768 // the address of the last 32-word section that contained a live object and
3769 // the marking bitmap for that cell, which describes where the live object
3770 // started. Unless we find a large free space in the bitmap we will not
3771 // digest this pair into a real address. We start the iteration here at the
3772 // first word in the marking bit map that indicates a live object.
3773 Address free_start = block_address;
3774 uint32_t free_start_cell = cells[cell_index];
3777 cell_index < last_cell_index;
3778 cell_index++, block_address += 32 * kPointerSize) {
3779 ASSERT((unsigned)cell_index ==
3780 Bitmap::IndexToCell(
3781 Bitmap::CellAlignIndex(
3782 p->AddressToMarkbitIndex(block_address))));
3783 uint32_t cell = cells[cell_index];
3785 // We have a live object. Check approximately whether it is more than 32
3786 // words since the last live object.
3787 if (block_address - free_start > 32 * kPointerSize) {
3788 free_start = DigestFreeStart(free_start, free_start_cell);
3789 if (block_address - free_start > 32 * kPointerSize) {
3790 // Now that we know the exact start of the free space it still looks
3791 // like we have a large enough free space to be worth bothering with.
3792 // so now we need to find the start of the first live object at the
3793 // end of the free space.
3794 free_end = StartOfLiveObject(block_address, cell);
3795 freed_bytes += space->Free(free_start,
3796 static_cast<int>(free_end - free_start));
3799 // Update our undigested record of where the current free area started.
3800 free_start = block_address;
3801 free_start_cell = cell;
3802 // Clear marking bits for current cell.
3803 cells[cell_index] = 0;
3807 // Handle the free space at the end of the page.
3808 if (block_address - free_start > 32 * kPointerSize) {
3809 free_start = DigestFreeStart(free_start, free_start_cell);
3810 freed_bytes += space->Free(free_start,
3811 static_cast<int>(block_address - free_start));
3814 p->ResetLiveBytes();
3819 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
3820 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
3821 sweeper == LAZY_CONSERVATIVE);
3823 space->ClearStats();
3825 PageIterator it(space);
3827 intptr_t freed_bytes = 0;
3828 int pages_swept = 0;
3829 intptr_t newspace_size = space->heap()->new_space()->Size();
3830 bool lazy_sweeping_active = false;
3831 bool unused_page_present = false;
3833 intptr_t old_space_size = heap()->PromotedSpaceSize();
3834 intptr_t space_left =
3835 Min(heap()->OldGenPromotionLimit(old_space_size),
3836 heap()->OldGenAllocationLimit(old_space_size)) - old_space_size;
3838 while (it.has_next()) {
3839 Page* p = it.next();
3841 // Clear sweeping flags indicating that marking bits are still intact.
3842 p->ClearSweptPrecisely();
3843 p->ClearSweptConservatively();
3845 if (p->IsEvacuationCandidate()) {
3846 ASSERT(evacuation_candidates_.length() > 0);
3850 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3851 // Will be processed in EvacuateNewSpaceAndCandidates.
3855 // One unused page is kept, all further are released before sweeping them.
3856 if (p->LiveBytes() == 0) {
3857 if (unused_page_present) {
3858 if (FLAG_gc_verbose) {
3859 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3860 reinterpret_cast<intptr_t>(p));
3862 // Adjust unswept free bytes because releasing a page expects said
3863 // counter to be accurate for unswept pages.
3864 space->IncreaseUnsweptFreeBytes(p);
3865 space->ReleasePage(p);
3868 unused_page_present = true;
3871 if (lazy_sweeping_active) {
3872 if (FLAG_gc_verbose) {
3873 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3874 reinterpret_cast<intptr_t>(p));
3876 space->IncreaseUnsweptFreeBytes(p);
3881 case CONSERVATIVE: {
3882 if (FLAG_gc_verbose) {
3883 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3884 reinterpret_cast<intptr_t>(p));
3886 SweepConservatively(space, p);
3890 case LAZY_CONSERVATIVE: {
3891 if (FLAG_gc_verbose) {
3892 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3893 reinterpret_cast<intptr_t>(p));
3895 freed_bytes += SweepConservatively(space, p);
3897 if (space_left + freed_bytes > newspace_size) {
3898 space->SetPagesToSweep(p->next_page());
3899 lazy_sweeping_active = true;
3901 if (FLAG_gc_verbose) {
3902 PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
3909 if (FLAG_gc_verbose) {
3910 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3911 reinterpret_cast<intptr_t>(p));
3913 if (space->identity() == CODE_SPACE) {
3914 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3916 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3927 if (FLAG_gc_verbose) {
3928 PrintF("SweepSpace: %s (%d pages swept)\n",
3929 AllocationSpaceName(space->identity()),
3933 // Give pages that are queued to be freed back to the OS.
3934 heap()->FreeQueuedChunks();
3938 void MarkCompactCollector::SweepSpaces() {
3939 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
3941 state_ = SWEEP_SPACES;
3943 SweeperType how_to_sweep =
3944 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
3945 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
3946 if (sweep_precisely_) how_to_sweep = PRECISE;
3947 // Noncompacting collections simply sweep the spaces to clear the mark
3948 // bits and free the nonlive blocks (for old and map spaces). We sweep
3949 // the map space last because freeing non-live maps overwrites them and
3950 // the other spaces rely on possibly non-live maps to get the sizes for
3951 // non-live objects.
3952 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3953 SweepSpace(heap()->old_data_space(), how_to_sweep);
3955 RemoveDeadInvalidatedCode();
3956 SweepSpace(heap()->code_space(), PRECISE);
3958 SweepSpace(heap()->cell_space(), PRECISE);
3960 EvacuateNewSpaceAndCandidates();
3962 // ClearNonLiveTransitions depends on precise sweeping of map space to
3963 // detect whether unmarked map became dead in this collection or in one
3964 // of the previous ones.
3965 SweepSpace(heap()->map_space(), PRECISE);
3967 // Deallocate unmarked objects and clear marked bits for marked objects.
3968 heap_->lo_space()->FreeUnmarkedObjects();
3972 void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3974 if (code_flusher_ != NULL) return;
3975 code_flusher_ = new CodeFlusher(heap()->isolate());
3977 if (code_flusher_ == NULL) return;
3978 delete code_flusher_;
3979 code_flusher_ = NULL;
3984 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3985 // Our profiling tools do not expect intersections between
3986 // code objects. We should either reenable it or change our tools.
3987 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3989 #ifdef ENABLE_GDB_JIT_INTERFACE
3990 if (obj->IsCode()) {
3991 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3994 if (obj->IsCode()) {
3995 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4000 void MarkCompactCollector::Initialize() {
4001 StaticMarkingVisitor::Initialize();
4005 bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4006 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4010 bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4011 SlotsBuffer** buffer_address,
4014 AdditionMode mode) {
4015 SlotsBuffer* buffer = *buffer_address;
4016 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4017 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4018 allocator->DeallocateChain(buffer_address);
4021 buffer = allocator->AllocateBuffer(buffer);
4022 *buffer_address = buffer;
4024 ASSERT(buffer->HasSpaceForTypedSlot());
4025 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4026 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4031 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4032 if (RelocInfo::IsCodeTarget(rmode)) {
4033 return SlotsBuffer::CODE_TARGET_SLOT;
4034 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4035 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
4036 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4037 return SlotsBuffer::DEBUG_TARGET_SLOT;
4038 } else if (RelocInfo::IsJSReturn(rmode)) {
4039 return SlotsBuffer::JS_RETURN_SLOT;
4042 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4046 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4047 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4048 if (target_page->IsEvacuationCandidate() &&
4049 (rinfo->host() == NULL ||
4050 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4051 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4052 target_page->slots_buffer_address(),
4053 SlotTypeForRMode(rinfo->rmode()),
4055 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4056 EvictEvacuationCandidate(target_page);
4062 void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
4063 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4064 if (target_page->IsEvacuationCandidate() &&
4065 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4066 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4067 target_page->slots_buffer_address(),
4068 SlotsBuffer::CODE_ENTRY_SLOT,
4070 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4071 EvictEvacuationCandidate(target_page);
4077 static inline SlotsBuffer::SlotType DecodeSlotType(
4078 SlotsBuffer::ObjectSlot slot) {
4079 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4083 void SlotsBuffer::UpdateSlots(Heap* heap) {
4084 PointersUpdatingVisitor v(heap);
4086 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4087 ObjectSlot slot = slots_[slot_idx];
4088 if (!IsTypedSlot(slot)) {
4089 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4092 ASSERT(slot_idx < idx_);
4094 DecodeSlotType(slot),
4095 reinterpret_cast<Address>(slots_[slot_idx]));
4101 void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4102 PointersUpdatingVisitor v(heap);
4104 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4105 ObjectSlot slot = slots_[slot_idx];
4106 if (!IsTypedSlot(slot)) {
4107 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4108 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4112 ASSERT(slot_idx < idx_);
4113 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4114 if (!IsOnInvalidatedCodeObject(pc)) {
4116 DecodeSlotType(slot),
4117 reinterpret_cast<Address>(slots_[slot_idx]));
4124 SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4125 return new SlotsBuffer(next_buffer);
4129 void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4134 void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4135 SlotsBuffer* buffer = *buffer_address;
4136 while (buffer != NULL) {
4137 SlotsBuffer* next_buffer = buffer->next();
4138 DeallocateBuffer(buffer);
4139 buffer = next_buffer;
4141 *buffer_address = NULL;
4145 } } // namespace v8::internal