1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/heap/incremental-marking.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
19 IncrementalMarking::IncrementalMarking(Heap* heap)
23 old_generation_space_available_at_start_of_incremental_(0),
24 old_generation_space_used_at_start_of_incremental_(0),
28 idle_marking_delay_counter_(0),
29 no_marking_scope_depth_(0),
30 unscanned_bytes_of_large_object_(0),
31 was_activated_(false),
32 weak_closure_was_overapproximated_(false),
33 weak_closure_approximation_rounds_(0),
34 request_type_(COMPLETE_MARKING) {}
37 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
39 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
40 MarkBit obj_bit = Marking::MarkBitFrom(obj);
41 if (Marking::IsBlack(obj_bit)) {
42 // Object is not going to be rescanned we need to record the slot.
43 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
50 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
52 DCHECK(obj->IsHeapObject());
53 IncrementalMarking* marking = isolate->heap()->incremental_marking();
55 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
56 int counter = chunk->write_barrier_counter();
57 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
58 marking->write_barriers_invoked_since_last_step_ +=
59 MemoryChunk::kWriteBarrierCounterGranularity -
60 chunk->write_barrier_counter();
61 chunk->set_write_barrier_counter(
62 MemoryChunk::kWriteBarrierCounterGranularity);
65 marking->RecordWrite(obj, slot, *slot);
69 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
72 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
73 RecordWriteIntoCode(host, &rinfo, value);
78 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
80 Code* host = heap_->isolate()
81 ->inner_pointer_to_code_cache()
82 ->GcSafeFindCodeForInnerPointer(pc);
83 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
84 RecordWriteIntoCode(host, &rinfo, value);
89 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
92 if (BaseRecordWrite(host, slot, value)) {
94 heap_->mark_compact_collector()->RecordCodeEntrySlot(
95 reinterpret_cast<Address>(slot), value);
100 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
103 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104 if (Marking::IsWhite(value_bit)) {
105 MarkBit obj_bit = Marking::MarkBitFrom(obj);
106 if (Marking::IsBlack(obj_bit)) {
107 BlackToGreyAndUnshift(obj, obj_bit);
108 RestartIfNotMarking();
110 // Object is either grey or white. It will be scanned if survives.
114 if (is_compacting_) {
115 MarkBit obj_bit = Marking::MarkBitFrom(obj);
116 if (Marking::IsBlack(obj_bit)) {
117 // Object is not going to be rescanned. We need to record the slot.
118 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
125 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
126 if (obj->IsHeapObject()) {
127 HeapObject* heap_obj = HeapObject::cast(obj);
128 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129 if (Marking::IsBlack(mark_bit)) {
130 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
133 Marking::AnyToGrey(mark_bit);
138 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
139 MarkBit mark_bit, int size) {
140 DCHECK(!Marking::IsImpossible(mark_bit));
141 if (mark_bit.Get()) return;
143 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
144 DCHECK(Marking::IsBlack(mark_bit));
148 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
149 MarkBit mark_bit, int size) {
150 DCHECK(!Marking::IsImpossible(mark_bit));
151 if (Marking::IsBlack(mark_bit)) return;
152 Marking::MarkBlack(mark_bit);
153 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
154 DCHECK(Marking::IsBlack(mark_bit));
158 class IncrementalMarkingMarkingVisitor
159 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
161 static void Initialize() {
162 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
163 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
164 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
165 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
168 static const int kProgressBarScanningChunk = 32 * 1024;
170 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
171 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
172 // TODO(mstarzinger): Move setting of the flag to the allocation site of
173 // the array. The visitor should just check the flag.
174 if (FLAG_use_marking_progress_bar &&
175 chunk->owner()->identity() == LO_SPACE) {
176 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
178 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
179 Heap* heap = map->GetHeap();
180 // When using a progress bar for large fixed arrays, scan only a chunk of
181 // the array and try to push it onto the marking deque again until it is
182 // fully scanned. Fall back to scanning it through to the end in case this
183 // fails because of a full deque.
184 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
186 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
188 Min(object_size, start_offset + kProgressBarScanningChunk);
189 int already_scanned_offset = start_offset;
190 bool scan_until_end = false;
192 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
193 HeapObject::RawField(object, start_offset),
194 HeapObject::RawField(object, end_offset));
195 start_offset = end_offset;
196 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
198 heap->mark_compact_collector()->marking_deque()->IsFull();
199 } while (scan_until_end && start_offset < object_size);
200 chunk->set_progress_bar(start_offset);
201 if (start_offset < object_size) {
202 heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
203 heap->incremental_marking()->NotifyIncompleteScanOfObject(
204 object_size - (start_offset - already_scanned_offset));
207 FixedArrayVisitor::Visit(map, object);
211 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
212 Context* context = Context::cast(object);
214 // We will mark cache black with a separate pass when we finish marking.
215 // Note that GC can happen when the context is not fully initialized,
216 // so the cache can be undefined.
217 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
218 if (!cache->IsUndefined()) {
219 MarkObjectGreyDoNotEnqueue(cache);
221 VisitNativeContext(map, context);
224 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
226 if (obj->IsHeapObject()) {
227 heap->mark_compact_collector()->RecordSlot(p, p, obj);
228 MarkObject(heap, obj);
232 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
233 for (Object** p = start; p < end; p++) {
235 if (obj->IsHeapObject()) {
236 heap->mark_compact_collector()->RecordSlot(start, p, obj);
237 MarkObject(heap, obj);
242 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
243 Object** start, Object** end)) {
244 for (Object** p = start; p < end; p++) {
246 if (obj->IsHeapObject()) {
247 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
248 MarkObject(heap, obj);
253 // Marks the object grey and pushes it on the marking stack.
254 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
255 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
258 // Marks the object black without pushing it on the marking stack.
259 // Returns true if object needed marking and false otherwise.
260 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
261 HeapObject* heap_object = HeapObject::cast(obj);
262 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
263 if (Marking::IsWhite(mark_bit)) {
265 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
266 heap_object->Size());
274 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
276 explicit IncrementalMarkingRootMarkingVisitor(
277 IncrementalMarking* incremental_marking)
278 : heap_(incremental_marking->heap()) {}
280 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
282 void VisitPointers(Object** start, Object** end) {
283 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
287 void MarkObjectByPointer(Object** p) {
289 if (!obj->IsHeapObject()) return;
291 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
298 void IncrementalMarking::Initialize() {
299 IncrementalMarkingMarkingVisitor::Initialize();
303 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
305 bool is_compacting) {
307 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
308 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
310 // It's difficult to filter out slots recorded for large objects.
311 if (chunk->owner()->identity() == LO_SPACE &&
312 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
313 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
315 } else if (chunk->owner()->identity() == CELL_SPACE ||
316 chunk->scan_on_scavenge()) {
317 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
318 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
320 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
326 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
328 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
330 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
338 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
340 PageIterator it(space);
341 while (it.has_next()) {
343 SetOldSpacePageFlags(p, false, false);
348 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
350 NewSpacePageIterator it(space);
351 while (it.has_next()) {
352 NewSpacePage* p = it.next();
353 SetNewSpacePageFlags(p, false);
358 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
359 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
360 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
361 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
362 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
363 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
364 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
366 LargePage* lop = heap_->lo_space()->first_page();
367 while (lop->is_valid()) {
368 SetOldSpacePageFlags(lop, false, false);
369 lop = lop->next_page();
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
375 PageIterator it(space);
376 while (it.has_next()) {
378 SetOldSpacePageFlags(p, true, is_compacting_);
383 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
384 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
385 while (it.has_next()) {
386 NewSpacePage* p = it.next();
387 SetNewSpacePageFlags(p, true);
392 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
393 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
394 ActivateIncrementalWriteBarrier(heap_->old_data_space());
395 ActivateIncrementalWriteBarrier(heap_->cell_space());
396 ActivateIncrementalWriteBarrier(heap_->map_space());
397 ActivateIncrementalWriteBarrier(heap_->code_space());
398 ActivateIncrementalWriteBarrier(heap_->new_space());
400 LargePage* lop = heap_->lo_space()->first_page();
401 while (lop->is_valid()) {
402 SetOldSpacePageFlags(lop, true, is_compacting_);
403 lop = lop->next_page();
408 bool IncrementalMarking::ShouldActivate() {
409 return WorthActivating() &&
410 heap_->NextGCIsLikelyToBeFull(
411 heap_->old_generation_allocation_limit());
415 bool IncrementalMarking::WasActivated() { return was_activated_; }
418 bool IncrementalMarking::WorthActivating() {
420 static const intptr_t kActivationThreshold = 8 * MB;
422 // TODO(gc) consider setting this to some low level so that some
423 // debug tests run with incremental marking and some without.
424 static const intptr_t kActivationThreshold = 0;
426 // Only start incremental marking in a safe state: 1) when incremental
427 // marking is turned on, 2) when we are currently not in a GC, and
428 // 3) when we are currently not serializing or deserializing the heap.
429 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
430 heap_->gc_state() == Heap::NOT_IN_GC &&
431 heap_->deserialization_complete() &&
432 !heap_->isolate()->serializer_enabled() &&
433 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
437 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
438 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
441 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
442 // we don't need to do anything if incremental marking is
444 } else if (IsCompacting()) {
445 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
447 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
452 static void PatchIncrementalMarkingRecordWriteStubs(
453 Heap* heap, RecordWriteStub::Mode mode) {
454 UnseededNumberDictionary* stubs = heap->code_stubs();
456 int capacity = stubs->Capacity();
457 for (int i = 0; i < capacity; i++) {
458 Object* k = stubs->KeyAt(i);
459 if (stubs->IsKey(k)) {
460 uint32_t key = NumberToUint32(k);
462 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
463 Object* e = stubs->ValueAt(i);
465 RecordWriteStub::Patch(Code::cast(e), mode);
473 void IncrementalMarking::Start(CompactionFlag flag) {
474 if (FLAG_trace_incremental_marking) {
475 PrintF("[IncrementalMarking] Start\n");
477 DCHECK(FLAG_incremental_marking);
478 DCHECK(FLAG_incremental_marking_steps);
479 DCHECK(state_ == STOPPED);
480 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
481 DCHECK(!heap_->isolate()->serializer_enabled());
485 was_activated_ = true;
487 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
490 if (FLAG_trace_incremental_marking) {
491 PrintF("[IncrementalMarking] Start sweeping.\n");
496 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
500 void IncrementalMarking::StartMarking(CompactionFlag flag) {
501 if (FLAG_trace_incremental_marking) {
502 PrintF("[IncrementalMarking] Start marking\n");
505 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
506 heap_->mark_compact_collector()->StartCompaction(
507 MarkCompactCollector::INCREMENTAL_COMPACTION);
511 RecordWriteStub::Mode mode = is_compacting_
512 ? RecordWriteStub::INCREMENTAL_COMPACTION
513 : RecordWriteStub::INCREMENTAL;
515 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
517 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
519 ActivateIncrementalWriteBarrier();
521 // Marking bits are cleared by the sweeper.
523 if (FLAG_verify_heap) {
524 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
528 heap_->CompletelyClearInstanceofCache();
529 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
531 if (FLAG_cleanup_code_caches_at_gc) {
532 // We will mark cache black with a separate pass
533 // when we finish marking.
534 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
537 // Mark strong roots grey.
538 IncrementalMarkingRootMarkingVisitor visitor(this);
539 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
541 // Ready to start incremental marking.
542 if (FLAG_trace_incremental_marking) {
543 PrintF("[IncrementalMarking] Running\n");
548 void IncrementalMarking::MarkObjectGroups() {
549 DCHECK(FLAG_overapproximate_weak_closure);
550 DCHECK(!weak_closure_was_overapproximated_);
552 int old_marking_deque_top =
553 heap_->mark_compact_collector()->marking_deque()->top();
555 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
557 IncrementalMarkingRootMarkingVisitor visitor(this);
558 heap_->isolate()->global_handles()->IterateObjectGroups(
559 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
561 int marking_progress =
562 abs(old_marking_deque_top -
563 heap_->mark_compact_collector()->marking_deque()->top());
565 ++weak_closure_approximation_rounds_;
566 if ((weak_closure_approximation_rounds_ >=
567 FLAG_max_object_groups_marking_rounds) ||
568 (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
569 weak_closure_was_overapproximated_ = true;
572 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
573 heap_->isolate()->global_handles()->RemoveObjectGroups();
577 void IncrementalMarking::PrepareForScavenge() {
578 if (!IsMarking()) return;
579 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
580 heap_->new_space()->FromSpaceEnd());
581 while (it.has_next()) {
582 Bitmap::Clear(it.next());
587 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
588 if (!IsMarking()) return;
590 MarkingDeque* marking_deque =
591 heap_->mark_compact_collector()->marking_deque();
592 int current = marking_deque->bottom();
593 int mask = marking_deque->mask();
594 int limit = marking_deque->top();
595 HeapObject** array = marking_deque->array();
596 int new_top = current;
598 Map* filler_map = heap_->one_pointer_filler_map();
600 while (current != limit) {
601 HeapObject* obj = array[current];
602 DCHECK(obj->IsHeapObject());
603 current = ((current + 1) & mask);
604 if (heap_->InNewSpace(obj)) {
605 MapWord map_word = obj->map_word();
606 if (map_word.IsForwardingAddress()) {
607 HeapObject* dest = map_word.ToForwardingAddress();
608 array[new_top] = dest;
609 new_top = ((new_top + 1) & mask);
610 DCHECK(new_top != marking_deque->bottom());
612 MarkBit mark_bit = Marking::MarkBitFrom(obj);
613 DCHECK(Marking::IsGrey(mark_bit) ||
614 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
617 } else if (obj->map() != filler_map) {
618 // Skip one word filler objects that appear on the
619 // stack when we perform in place array shift.
620 array[new_top] = obj;
621 new_top = ((new_top + 1) & mask);
622 DCHECK(new_top != marking_deque->bottom());
624 MarkBit mark_bit = Marking::MarkBitFrom(obj);
625 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
626 DCHECK(Marking::IsGrey(mark_bit) ||
627 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
628 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
629 Marking::IsBlack(mark_bit)));
633 marking_deque->set_top(new_top);
637 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
638 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
639 if (Marking::IsWhite(map_mark_bit)) {
640 WhiteToGreyAndPush(map, map_mark_bit);
643 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
645 MarkBit mark_bit = Marking::MarkBitFrom(obj);
646 #if ENABLE_SLOW_DCHECKS
647 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
648 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
649 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
650 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
651 Marking::IsBlack(mark_bit)));
653 MarkBlackOrKeepBlack(obj, mark_bit, size);
657 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
658 MarkBit mark_bit = Marking::MarkBitFrom(obj);
659 if (mark_bit.data_only()) {
660 MarkBlackOrKeepGrey(obj, mark_bit, obj->Size());
661 } else if (Marking::IsWhite(mark_bit)) {
662 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
667 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
668 intptr_t bytes_processed = 0;
669 Map* filler_map = heap_->one_pointer_filler_map();
670 MarkingDeque* marking_deque =
671 heap_->mark_compact_collector()->marking_deque();
672 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
673 HeapObject* obj = marking_deque->Pop();
675 // Explicitly skip one word fillers. Incremental markbit patterns are
676 // correct only for objects that occupy at least two words.
677 Map* map = obj->map();
678 if (map == filler_map) continue;
680 int size = obj->SizeFromMap(map);
681 unscanned_bytes_of_large_object_ = 0;
682 VisitObject(map, obj, size);
683 bytes_processed += size - unscanned_bytes_of_large_object_;
685 return bytes_processed;
689 void IncrementalMarking::ProcessMarkingDeque() {
690 Map* filler_map = heap_->one_pointer_filler_map();
691 MarkingDeque* marking_deque =
692 heap_->mark_compact_collector()->marking_deque();
693 while (!marking_deque->IsEmpty()) {
694 HeapObject* obj = marking_deque->Pop();
696 // Explicitly skip one word fillers. Incremental markbit patterns are
697 // correct only for objects that occupy at least two words.
698 Map* map = obj->map();
699 if (map == filler_map) continue;
701 VisitObject(map, obj, obj->SizeFromMap(map));
706 void IncrementalMarking::Hurry() {
707 if (state() == MARKING) {
709 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
710 start = base::OS::TimeCurrentMillis();
711 if (FLAG_trace_incremental_marking) {
712 PrintF("[IncrementalMarking] Hurry\n");
715 // TODO(gc) hurry can mark objects it encounters black as mutator
717 ProcessMarkingDeque();
719 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
720 double end = base::OS::TimeCurrentMillis();
721 double delta = end - start;
722 heap_->tracer()->AddMarkingTime(delta);
723 if (FLAG_trace_incremental_marking) {
724 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
725 static_cast<int>(delta));
730 if (FLAG_cleanup_code_caches_at_gc) {
731 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
732 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
733 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
734 PolymorphicCodeCache::kSize);
737 Object* context = heap_->native_contexts_list();
738 while (!context->IsUndefined()) {
739 // GC can happen when the context is not fully initialized,
740 // so the cache can be undefined.
741 HeapObject* cache = HeapObject::cast(
742 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
743 if (!cache->IsUndefined()) {
744 MarkBit mark_bit = Marking::MarkBitFrom(cache);
745 if (Marking::IsGrey(mark_bit)) {
746 Marking::GreyToBlack(mark_bit);
747 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
750 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
755 void IncrementalMarking::Abort() {
756 if (IsStopped()) return;
757 if (FLAG_trace_incremental_marking) {
758 PrintF("[IncrementalMarking] Aborting.\n");
760 heap_->new_space()->LowerInlineAllocationLimit(0);
761 IncrementalMarking::set_should_hurry(false);
764 PatchIncrementalMarkingRecordWriteStubs(heap_,
765 RecordWriteStub::STORE_BUFFER_ONLY);
766 DeactivateIncrementalWriteBarrier();
768 if (is_compacting_) {
769 LargeObjectIterator it(heap_->lo_space());
770 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
771 Page* p = Page::FromAddress(obj->address());
772 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
773 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
778 heap_->isolate()->stack_guard()->ClearGC();
780 is_compacting_ = false;
784 void IncrementalMarking::Finalize() {
787 is_compacting_ = false;
788 heap_->new_space()->LowerInlineAllocationLimit(0);
789 IncrementalMarking::set_should_hurry(false);
791 PatchIncrementalMarkingRecordWriteStubs(heap_,
792 RecordWriteStub::STORE_BUFFER_ONLY);
793 DeactivateIncrementalWriteBarrier();
794 DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
795 heap_->isolate()->stack_guard()->ClearGC();
799 void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
800 DCHECK(FLAG_overapproximate_weak_closure);
801 DCHECK(!weak_closure_was_overapproximated_);
802 if (FLAG_trace_incremental_marking) {
803 PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
805 request_type_ = OVERAPPROXIMATION;
806 if (action == GC_VIA_STACK_GUARD) {
807 heap_->isolate()->stack_guard()->RequestGC();
812 void IncrementalMarking::MarkingComplete(CompletionAction action) {
814 // We will set the stack guard to request a GC now. This will mean the rest
815 // of the GC gets performed as soon as possible (we can't do a GC here in a
816 // record-write context). If a few things get allocated between now and then
817 // that shouldn't make us do a scavenge and keep being incremental, so we set
818 // the should-hurry flag to indicate that there can't be much work left to do.
819 set_should_hurry(true);
820 if (FLAG_trace_incremental_marking) {
821 PrintF("[IncrementalMarking] Complete (normal).\n");
823 request_type_ = COMPLETE_MARKING;
824 if (action == GC_VIA_STACK_GUARD) {
825 heap_->isolate()->stack_guard()->RequestGC();
830 void IncrementalMarking::Epilogue() {
831 was_activated_ = false;
832 weak_closure_was_overapproximated_ = false;
833 weak_closure_approximation_rounds_ = 0;
837 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
838 if (IsStopped() && ShouldActivate()) {
839 // TODO(hpayer): Let's play safe for now, but compaction should be
840 // in principle possible.
841 Start(PREVENT_COMPACTION);
843 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
848 void IncrementalMarking::SpeedUp() {
849 bool speed_up = false;
851 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
853 PrintPID("Speed up marking after %d steps\n",
854 static_cast<int>(kMarkingSpeedAccellerationInterval));
859 bool space_left_is_very_small =
860 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
862 bool only_1_nth_of_space_that_was_available_still_left =
863 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
864 old_generation_space_available_at_start_of_incremental_);
866 if (space_left_is_very_small ||
867 only_1_nth_of_space_that_was_available_still_left) {
868 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
872 bool size_of_old_space_multiplied_by_n_during_marking =
873 (heap_->PromotedTotalSize() >
874 (marking_speed_ + 1) *
875 old_generation_space_used_at_start_of_incremental_);
876 if (size_of_old_space_multiplied_by_n_during_marking) {
879 PrintPID("Speed up marking because of heap size increase\n");
883 int64_t promoted_during_marking =
884 heap_->PromotedTotalSize() -
885 old_generation_space_used_at_start_of_incremental_;
886 intptr_t delay = marking_speed_ * MB;
887 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
889 // We try to scan at at least twice the speed that we are allocating.
890 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
892 PrintPID("Speed up marking because marker was not keeping up\n");
898 if (state_ != MARKING) {
900 PrintPID("Postponing speeding up marking until marking starts\n");
903 marking_speed_ += kMarkingSpeedAccelleration;
904 marking_speed_ = static_cast<int>(
905 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
907 PrintPID("Marking speed increased to %d\n", marking_speed_);
914 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
915 CompletionAction action,
916 ForceMarkingAction marking,
917 ForceCompletionAction completion) {
918 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
919 !FLAG_incremental_marking_steps ||
920 (state_ != SWEEPING && state_ != MARKING)) {
924 allocated_ += allocated_bytes;
926 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
927 write_barriers_invoked_since_last_step_ <
928 kWriteBarriersInvokedThreshold) {
932 // If an idle notification happened recently, we delay marking steps.
933 if (marking == DO_NOT_FORCE_MARKING &&
934 heap_->RecentIdleNotificationHappened()) {
938 if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
940 intptr_t bytes_processed = 0;
942 HistogramTimerScope incremental_marking_scope(
943 heap_->isolate()->counters()->gc_incremental_marking());
944 double start = base::OS::TimeCurrentMillis();
946 // The marking speed is driven either by the allocation rate or by the rate
947 // at which we are having to check the color of objects in the write
949 // It is possible for a tight non-allocating loop to run a lot of write
950 // barriers before we get here and check them (marking can only take place
952 // allocation), so to reduce the lumpiness we don't use the write barriers
953 // invoked since last step directly to determine the amount of work to do.
954 intptr_t bytes_to_process =
956 Max(allocated_, write_barriers_invoked_since_last_step_);
958 write_barriers_invoked_since_last_step_ = 0;
960 bytes_scanned_ += bytes_to_process;
962 if (state_ == SWEEPING) {
963 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
964 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
965 !heap_->concurrent_sweeping_enabled())) {
966 heap_->mark_compact_collector()->EnsureSweepingCompleted();
968 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
970 StartMarking(PREVENT_COMPACTION);
972 } else if (state_ == MARKING) {
973 bytes_processed = ProcessMarkingDeque(bytes_to_process);
974 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
975 if (completion == FORCE_COMPLETION ||
976 IsIdleMarkingDelayCounterLimitReached()) {
977 if (FLAG_overapproximate_weak_closure &&
978 !weak_closure_was_overapproximated_) {
979 OverApproximateWeakClosure(action);
981 MarkingComplete(action);
984 IncrementIdleMarkingDelayCounter();
991 // Speed up marking if we are marking too slow or if we are almost done
995 double end = base::OS::TimeCurrentMillis();
996 double duration = (end - start);
997 // Note that we report zero bytes here when sweeping was in progress or
998 // when we just started incremental marking. In these cases we did not
999 // process the marking deque.
1000 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1002 return bytes_processed;
1006 void IncrementalMarking::ResetStepCounters() {
1008 old_generation_space_available_at_start_of_incremental_ =
1009 SpaceLeftInOldSpace();
1010 old_generation_space_used_at_start_of_incremental_ =
1011 heap_->PromotedTotalSize();
1012 bytes_rescanned_ = 0;
1013 marking_speed_ = kInitialMarkingSpeed;
1015 write_barriers_invoked_since_last_step_ = 0;
1019 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1020 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1024 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1025 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1029 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1030 idle_marking_delay_counter_++;
1034 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1035 idle_marking_delay_counter_ = 0;
1038 } // namespace v8::internal