1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/heap/incremental-marking.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
19 IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
20 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
21 IncrementalMarking::FORCE_MARKING,
22 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
26 IncrementalMarking::IncrementalMarking(Heap* heap)
29 is_compacting_(false),
31 old_generation_space_available_at_start_of_incremental_(0),
32 old_generation_space_used_at_start_of_incremental_(0),
38 write_barriers_invoked_since_last_step_(0),
39 idle_marking_delay_counter_(0),
40 no_marking_scope_depth_(0),
41 unscanned_bytes_of_large_object_(0),
42 was_activated_(false),
43 weak_closure_was_overapproximated_(false),
44 weak_closure_approximation_rounds_(0),
45 request_type_(COMPLETE_MARKING),
46 gc_callback_flags_(kNoGCCallbackFlags) {}
49 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
51 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
52 MarkBit obj_bit = Marking::MarkBitFrom(obj);
53 if (Marking::IsBlack(obj_bit)) {
54 // Object is not going to be rescanned we need to record the slot.
55 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
62 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
64 DCHECK(obj->IsHeapObject());
65 IncrementalMarking* marking = isolate->heap()->incremental_marking();
67 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
68 int counter = chunk->write_barrier_counter();
69 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
70 marking->write_barriers_invoked_since_last_step_ +=
71 MemoryChunk::kWriteBarrierCounterGranularity -
72 chunk->write_barrier_counter();
73 chunk->set_write_barrier_counter(
74 MemoryChunk::kWriteBarrierCounterGranularity);
77 marking->RecordWrite(obj, slot, *slot);
81 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
84 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
85 RecordWriteIntoCode(host, &rinfo, value);
90 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
92 Code* host = heap_->isolate()
93 ->inner_pointer_to_code_cache()
94 ->GcSafeFindCodeForInnerPointer(pc);
95 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
96 RecordWriteIntoCode(host, &rinfo, value);
101 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
104 if (BaseRecordWrite(host, slot, value)) {
105 DCHECK(slot != NULL);
106 heap_->mark_compact_collector()->RecordCodeEntrySlot(
107 reinterpret_cast<Address>(slot), value);
112 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
115 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
116 if (Marking::IsWhite(value_bit)) {
117 MarkBit obj_bit = Marking::MarkBitFrom(obj);
118 if (Marking::IsBlack(obj_bit)) {
119 BlackToGreyAndUnshift(obj, obj_bit);
120 RestartIfNotMarking();
122 // Object is either grey or white. It will be scanned if survives.
126 if (is_compacting_) {
127 MarkBit obj_bit = Marking::MarkBitFrom(obj);
128 if (Marking::IsBlack(obj_bit)) {
129 // Object is not going to be rescanned. We need to record the slot.
130 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
137 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
138 if (obj->IsHeapObject()) {
139 HeapObject* heap_obj = HeapObject::cast(obj);
140 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
141 if (Marking::IsBlack(mark_bit)) {
142 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
145 Marking::AnyToGrey(mark_bit);
150 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
151 MarkBit mark_bit, int size) {
152 DCHECK(!Marking::IsImpossible(mark_bit));
153 if (Marking::IsBlack(mark_bit)) return;
154 Marking::MarkBlack(mark_bit);
155 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
159 class IncrementalMarkingMarkingVisitor
160 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
162 static void Initialize() {
163 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
164 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
165 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
166 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
169 static const int kProgressBarScanningChunk = 32 * 1024;
171 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
172 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
173 // TODO(mstarzinger): Move setting of the flag to the allocation site of
174 // the array. The visitor should just check the flag.
175 if (FLAG_use_marking_progress_bar &&
176 chunk->owner()->identity() == LO_SPACE) {
177 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
179 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
180 Heap* heap = map->GetHeap();
181 // When using a progress bar for large fixed arrays, scan only a chunk of
182 // the array and try to push it onto the marking deque again until it is
183 // fully scanned. Fall back to scanning it through to the end in case this
184 // fails because of a full deque.
185 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
187 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
189 Min(object_size, start_offset + kProgressBarScanningChunk);
190 int already_scanned_offset = start_offset;
191 bool scan_until_end = false;
193 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
194 HeapObject::RawField(object, start_offset),
195 HeapObject::RawField(object, end_offset));
196 start_offset = end_offset;
197 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
199 heap->mark_compact_collector()->marking_deque()->IsFull();
200 } while (scan_until_end && start_offset < object_size);
201 chunk->set_progress_bar(start_offset);
202 if (start_offset < object_size) {
203 if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
204 heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
206 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
207 heap->mark_compact_collector()->marking_deque()->UnshiftBlack(object);
209 heap->incremental_marking()->NotifyIncompleteScanOfObject(
210 object_size - (start_offset - already_scanned_offset));
213 FixedArrayVisitor::Visit(map, object);
217 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
218 Context* context = Context::cast(object);
220 // We will mark cache black with a separate pass when we finish marking.
221 // Note that GC can happen when the context is not fully initialized,
222 // so the cache can be undefined.
223 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
224 if (!cache->IsUndefined()) {
225 MarkObjectGreyDoNotEnqueue(cache);
227 VisitNativeContext(map, context);
230 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
232 if (obj->IsHeapObject()) {
233 heap->mark_compact_collector()->RecordSlot(p, p, obj);
234 MarkObject(heap, obj);
238 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
239 for (Object** p = start; p < end; p++) {
241 if (obj->IsHeapObject()) {
242 heap->mark_compact_collector()->RecordSlot(start, p, obj);
243 MarkObject(heap, obj);
248 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
249 Object** start, Object** end)) {
250 for (Object** p = start; p < end; p++) {
252 if (obj->IsHeapObject()) {
253 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
254 MarkObject(heap, obj);
259 // Marks the object grey and pushes it on the marking stack.
260 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
261 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
264 // Marks the object black without pushing it on the marking stack.
265 // Returns true if object needed marking and false otherwise.
266 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
267 HeapObject* heap_object = HeapObject::cast(obj);
268 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
269 if (Marking::IsWhite(mark_bit)) {
270 Marking::MarkBlack(mark_bit);
271 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
272 heap_object->Size());
280 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
282 explicit IncrementalMarkingRootMarkingVisitor(
283 IncrementalMarking* incremental_marking)
284 : heap_(incremental_marking->heap()) {}
286 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
288 void VisitPointers(Object** start, Object** end) {
289 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
293 void MarkObjectByPointer(Object** p) {
295 if (!obj->IsHeapObject()) return;
297 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
304 void IncrementalMarking::Initialize() {
305 IncrementalMarkingMarkingVisitor::Initialize();
309 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
311 bool is_compacting) {
313 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
314 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
316 // It's difficult to filter out slots recorded for large objects.
317 if (chunk->owner()->identity() == LO_SPACE &&
318 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
319 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
322 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
323 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
328 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
330 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
336 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
340 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
342 PageIterator it(space);
343 while (it.has_next()) {
345 SetOldSpacePageFlags(p, false, false);
350 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
352 NewSpacePageIterator it(space);
353 while (it.has_next()) {
354 NewSpacePage* p = it.next();
355 SetNewSpacePageFlags(p, false);
360 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
361 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
362 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
363 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
364 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
366 LargePage* lop = heap_->lo_space()->first_page();
367 while (LargePage::IsValid(lop)) {
368 SetOldSpacePageFlags(lop, false, false);
369 lop = lop->next_page();
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
375 PageIterator it(space);
376 while (it.has_next()) {
378 SetOldSpacePageFlags(p, true, is_compacting_);
383 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
384 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
385 while (it.has_next()) {
386 NewSpacePage* p = it.next();
387 SetNewSpacePageFlags(p, true);
392 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
393 ActivateIncrementalWriteBarrier(heap_->old_space());
394 ActivateIncrementalWriteBarrier(heap_->map_space());
395 ActivateIncrementalWriteBarrier(heap_->code_space());
396 ActivateIncrementalWriteBarrier(heap_->new_space());
398 LargePage* lop = heap_->lo_space()->first_page();
399 while (LargePage::IsValid(lop)) {
400 SetOldSpacePageFlags(lop, true, is_compacting_);
401 lop = lop->next_page();
406 bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
407 return CanBeActivated() &&
408 heap_->HeapIsFullEnoughToStartIncrementalMarking(
409 heap_->old_generation_allocation_limit());
413 bool IncrementalMarking::WasActivated() { return was_activated_; }
416 bool IncrementalMarking::CanBeActivated() {
418 static const intptr_t kActivationThreshold = 8 * MB;
420 // TODO(gc) consider setting this to some low level so that some
421 // debug tests run with incremental marking and some without.
422 static const intptr_t kActivationThreshold = 0;
424 // Only start incremental marking in a safe state: 1) when incremental
425 // marking is turned on, 2) when we are currently not in a GC, and
426 // 3) when we are currently not serializing or deserializing the heap.
427 // Don't switch on for very small heaps.
428 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
429 heap_->gc_state() == Heap::NOT_IN_GC &&
430 heap_->deserialization_complete() &&
431 !heap_->isolate()->serializer_enabled() &&
432 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
436 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
437 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
440 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
441 // we don't need to do anything if incremental marking is
443 } else if (IsCompacting()) {
444 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
446 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
451 void IncrementalMarking::NotifyOfHighPromotionRate() {
453 if (marking_speed_ < kFastMarking) {
455 PrintIsolate(heap()->isolate(),
456 "Increasing marking speed to %d "
457 "due to high promotion rate\n",
458 static_cast<int>(kFastMarking));
460 marking_speed_ = kFastMarking;
466 static void PatchIncrementalMarkingRecordWriteStubs(
467 Heap* heap, RecordWriteStub::Mode mode) {
468 UnseededNumberDictionary* stubs = heap->code_stubs();
470 int capacity = stubs->Capacity();
471 for (int i = 0; i < capacity; i++) {
472 Object* k = stubs->KeyAt(i);
473 if (stubs->IsKey(k)) {
474 uint32_t key = NumberToUint32(k);
476 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
477 Object* e = stubs->ValueAt(i);
479 RecordWriteStub::Patch(Code::cast(e), mode);
487 void IncrementalMarking::Start(int mark_compact_flags,
488 const GCCallbackFlags gc_callback_flags,
489 const char* reason) {
490 if (FLAG_trace_incremental_marking) {
491 PrintF("[IncrementalMarking] Start (%s)\n",
492 (reason == nullptr) ? "unknown reason" : reason);
494 DCHECK(FLAG_incremental_marking);
495 DCHECK(FLAG_incremental_marking_steps);
496 DCHECK(state_ == STOPPED);
497 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
498 DCHECK(!heap_->isolate()->serializer_enabled());
502 gc_callback_flags_ = gc_callback_flags;
503 was_activated_ = true;
505 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
506 heap_->mark_compact_collector()->SetFlags(mark_compact_flags);
508 heap_->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
510 if (FLAG_trace_incremental_marking) {
511 PrintF("[IncrementalMarking] Start sweeping.\n");
516 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
520 void IncrementalMarking::StartMarking() {
521 if (FLAG_trace_incremental_marking) {
522 PrintF("[IncrementalMarking] Start marking\n");
525 is_compacting_ = !FLAG_never_compact &&
526 heap_->mark_compact_collector()->StartCompaction(
527 MarkCompactCollector::INCREMENTAL_COMPACTION);
531 RecordWriteStub::Mode mode = is_compacting_
532 ? RecordWriteStub::INCREMENTAL_COMPACTION
533 : RecordWriteStub::INCREMENTAL;
535 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
537 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
538 MarkCompactCollector::kMaxMarkingDequeSize);
540 ActivateIncrementalWriteBarrier();
542 // Marking bits are cleared by the sweeper.
544 if (FLAG_verify_heap) {
545 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
549 heap_->CompletelyClearInstanceofCache();
550 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
552 if (FLAG_cleanup_code_caches_at_gc) {
553 // We will mark cache black with a separate pass
554 // when we finish marking.
555 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
558 // Mark strong roots grey.
559 IncrementalMarkingRootMarkingVisitor visitor(this);
560 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
562 // Ready to start incremental marking.
563 if (FLAG_trace_incremental_marking) {
564 PrintF("[IncrementalMarking] Running\n");
569 void IncrementalMarking::MarkObjectGroups() {
570 DCHECK(FLAG_overapproximate_weak_closure);
571 DCHECK(!weak_closure_was_overapproximated_);
573 int old_marking_deque_top =
574 heap_->mark_compact_collector()->marking_deque()->top();
576 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
578 IncrementalMarkingRootMarkingVisitor visitor(this);
579 heap_->isolate()->global_handles()->IterateObjectGroups(
580 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
582 int marking_progress =
583 abs(old_marking_deque_top -
584 heap_->mark_compact_collector()->marking_deque()->top());
586 ++weak_closure_approximation_rounds_;
587 if ((weak_closure_approximation_rounds_ >=
588 FLAG_max_object_groups_marking_rounds) ||
589 (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
590 weak_closure_was_overapproximated_ = true;
593 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
594 heap_->isolate()->global_handles()->RemoveObjectGroups();
598 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
599 if (!IsMarking()) return;
601 MarkingDeque* marking_deque =
602 heap_->mark_compact_collector()->marking_deque();
603 int current = marking_deque->bottom();
604 int mask = marking_deque->mask();
605 int limit = marking_deque->top();
606 HeapObject** array = marking_deque->array();
607 int new_top = current;
609 Map* filler_map = heap_->one_pointer_filler_map();
611 while (current != limit) {
612 HeapObject* obj = array[current];
613 DCHECK(obj->IsHeapObject());
614 current = ((current + 1) & mask);
615 if (heap_->InNewSpace(obj)) {
616 MapWord map_word = obj->map_word();
617 if (map_word.IsForwardingAddress()) {
618 HeapObject* dest = map_word.ToForwardingAddress();
619 array[new_top] = dest;
620 new_top = ((new_top + 1) & mask);
621 DCHECK(new_top != marking_deque->bottom());
623 MarkBit mark_bit = Marking::MarkBitFrom(obj);
624 DCHECK(Marking::IsGrey(mark_bit) ||
625 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
628 } else if (obj->map() != filler_map) {
629 // Skip one word filler objects that appear on the
630 // stack when we perform in place array shift.
631 array[new_top] = obj;
632 new_top = ((new_top + 1) & mask);
633 DCHECK(new_top != marking_deque->bottom());
635 MarkBit mark_bit = Marking::MarkBitFrom(obj);
636 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
637 DCHECK(Marking::IsGrey(mark_bit) ||
638 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
639 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
640 Marking::IsBlack(mark_bit)));
644 marking_deque->set_top(new_top);
648 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
649 MarkObject(heap_, map);
651 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
653 MarkBit mark_bit = Marking::MarkBitFrom(obj);
654 #if ENABLE_SLOW_DCHECKS
655 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
656 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
657 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
658 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
659 Marking::IsBlack(mark_bit)));
661 MarkBlackOrKeepBlack(obj, mark_bit, size);
665 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
666 MarkBit mark_bit = Marking::MarkBitFrom(obj);
667 if (Marking::IsWhite(mark_bit)) {
668 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
673 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
674 intptr_t bytes_processed = 0;
675 Map* filler_map = heap_->one_pointer_filler_map();
676 MarkingDeque* marking_deque =
677 heap_->mark_compact_collector()->marking_deque();
678 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
679 HeapObject* obj = marking_deque->Pop();
681 // Explicitly skip one word fillers. Incremental markbit patterns are
682 // correct only for objects that occupy at least two words.
683 Map* map = obj->map();
684 if (map == filler_map) continue;
686 int size = obj->SizeFromMap(map);
687 unscanned_bytes_of_large_object_ = 0;
688 VisitObject(map, obj, size);
689 bytes_processed += size - unscanned_bytes_of_large_object_;
691 return bytes_processed;
695 void IncrementalMarking::ProcessMarkingDeque() {
696 Map* filler_map = heap_->one_pointer_filler_map();
697 MarkingDeque* marking_deque =
698 heap_->mark_compact_collector()->marking_deque();
699 while (!marking_deque->IsEmpty()) {
700 HeapObject* obj = marking_deque->Pop();
702 // Explicitly skip one word fillers. Incremental markbit patterns are
703 // correct only for objects that occupy at least two words.
704 Map* map = obj->map();
705 if (map == filler_map) continue;
707 VisitObject(map, obj, obj->SizeFromMap(map));
712 void IncrementalMarking::Hurry() {
713 if (state() == MARKING) {
715 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
716 start = base::OS::TimeCurrentMillis();
717 if (FLAG_trace_incremental_marking) {
718 PrintF("[IncrementalMarking] Hurry\n");
721 // TODO(gc) hurry can mark objects it encounters black as mutator
723 ProcessMarkingDeque();
725 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
726 double end = base::OS::TimeCurrentMillis();
727 double delta = end - start;
728 heap_->tracer()->AddMarkingTime(delta);
729 if (FLAG_trace_incremental_marking) {
730 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
731 static_cast<int>(delta));
736 if (FLAG_cleanup_code_caches_at_gc) {
737 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
738 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
739 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
740 PolymorphicCodeCache::kSize);
743 Object* context = heap_->native_contexts_list();
744 while (!context->IsUndefined()) {
745 // GC can happen when the context is not fully initialized,
746 // so the cache can be undefined.
747 HeapObject* cache = HeapObject::cast(
748 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
749 if (!cache->IsUndefined()) {
750 MarkBit mark_bit = Marking::MarkBitFrom(cache);
751 if (Marking::IsGrey(mark_bit)) {
752 Marking::GreyToBlack(mark_bit);
753 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
756 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
761 void IncrementalMarking::Abort() {
762 if (IsStopped()) return;
763 if (FLAG_trace_incremental_marking) {
764 PrintF("[IncrementalMarking] Aborting.\n");
766 heap_->new_space()->LowerInlineAllocationLimit(0);
767 IncrementalMarking::set_should_hurry(false);
770 PatchIncrementalMarkingRecordWriteStubs(heap_,
771 RecordWriteStub::STORE_BUFFER_ONLY);
772 DeactivateIncrementalWriteBarrier();
774 if (is_compacting_) {
775 LargeObjectIterator it(heap_->lo_space());
776 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
777 Page* p = Page::FromAddress(obj->address());
778 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
779 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
784 heap_->isolate()->stack_guard()->ClearGC();
786 is_compacting_ = false;
790 void IncrementalMarking::Finalize() {
793 is_compacting_ = false;
794 heap_->new_space()->LowerInlineAllocationLimit(0);
795 IncrementalMarking::set_should_hurry(false);
797 PatchIncrementalMarkingRecordWriteStubs(heap_,
798 RecordWriteStub::STORE_BUFFER_ONLY);
799 DeactivateIncrementalWriteBarrier();
800 DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
801 heap_->isolate()->stack_guard()->ClearGC();
805 void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
806 DCHECK(FLAG_overapproximate_weak_closure);
807 DCHECK(!weak_closure_was_overapproximated_);
808 if (FLAG_trace_incremental_marking) {
809 PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
811 request_type_ = OVERAPPROXIMATION;
812 if (action == GC_VIA_STACK_GUARD) {
813 heap_->isolate()->stack_guard()->RequestGC();
818 void IncrementalMarking::MarkingComplete(CompletionAction action) {
820 // We will set the stack guard to request a GC now. This will mean the rest
821 // of the GC gets performed as soon as possible (we can't do a GC here in a
822 // record-write context). If a few things get allocated between now and then
823 // that shouldn't make us do a scavenge and keep being incremental, so we set
824 // the should-hurry flag to indicate that there can't be much work left to do.
825 set_should_hurry(true);
826 if (FLAG_trace_incremental_marking) {
827 PrintF("[IncrementalMarking] Complete (normal).\n");
829 request_type_ = COMPLETE_MARKING;
830 if (action == GC_VIA_STACK_GUARD) {
831 heap_->isolate()->stack_guard()->RequestGC();
836 void IncrementalMarking::Epilogue() {
837 was_activated_ = false;
838 weak_closure_was_overapproximated_ = false;
839 weak_closure_approximation_rounds_ = 0;
843 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
844 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
845 Start(Heap::kNoGCFlags, kNoGCCallbackFlags, "old space step");
847 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
852 void IncrementalMarking::SpeedUp() {
853 bool speed_up = false;
855 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
856 if (FLAG_trace_incremental_marking) {
857 PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
858 static_cast<int>(kMarkingSpeedAccellerationInterval));
863 bool space_left_is_very_small =
864 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
866 bool only_1_nth_of_space_that_was_available_still_left =
867 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
868 old_generation_space_available_at_start_of_incremental_);
870 if (space_left_is_very_small ||
871 only_1_nth_of_space_that_was_available_still_left) {
872 if (FLAG_trace_incremental_marking)
873 PrintIsolate(heap()->isolate(),
874 "Speed up marking because of low space left\n");
878 bool size_of_old_space_multiplied_by_n_during_marking =
879 (heap_->PromotedTotalSize() >
880 (marking_speed_ + 1) *
881 old_generation_space_used_at_start_of_incremental_);
882 if (size_of_old_space_multiplied_by_n_during_marking) {
884 if (FLAG_trace_incremental_marking) {
885 PrintIsolate(heap()->isolate(),
886 "Speed up marking because of heap size increase\n");
890 int64_t promoted_during_marking =
891 heap_->PromotedTotalSize() -
892 old_generation_space_used_at_start_of_incremental_;
893 intptr_t delay = marking_speed_ * MB;
894 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
896 // We try to scan at at least twice the speed that we are allocating.
897 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
898 if (FLAG_trace_incremental_marking) {
899 PrintIsolate(heap()->isolate(),
900 "Speed up marking because marker was not keeping up\n");
906 if (state_ != MARKING) {
907 if (FLAG_trace_incremental_marking) {
908 PrintIsolate(heap()->isolate(),
909 "Postponing speeding up marking until marking starts\n");
912 marking_speed_ += kMarkingSpeedAccelleration;
913 marking_speed_ = static_cast<int>(
914 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
915 if (FLAG_trace_incremental_marking) {
916 PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
924 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
925 CompletionAction action,
926 ForceMarkingAction marking,
927 ForceCompletionAction completion) {
928 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
929 !FLAG_incremental_marking_steps ||
930 (state_ != SWEEPING && state_ != MARKING)) {
934 allocated_ += allocated_bytes;
936 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
937 write_barriers_invoked_since_last_step_ <
938 kWriteBarriersInvokedThreshold) {
942 // If an idle notification happened recently, we delay marking steps.
943 if (marking == DO_NOT_FORCE_MARKING &&
944 heap_->RecentIdleNotificationHappened()) {
948 if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
950 intptr_t bytes_processed = 0;
952 HistogramTimerScope incremental_marking_scope(
953 heap_->isolate()->counters()->gc_incremental_marking());
954 double start = base::OS::TimeCurrentMillis();
956 // The marking speed is driven either by the allocation rate or by the rate
957 // at which we are having to check the color of objects in the write
959 // It is possible for a tight non-allocating loop to run a lot of write
960 // barriers before we get here and check them (marking can only take place
962 // allocation), so to reduce the lumpiness we don't use the write barriers
963 // invoked since last step directly to determine the amount of work to do.
964 intptr_t bytes_to_process =
966 Max(allocated_, write_barriers_invoked_since_last_step_);
968 write_barriers_invoked_since_last_step_ = 0;
970 bytes_scanned_ += bytes_to_process;
972 if (state_ == SWEEPING) {
973 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
974 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
975 !heap_->concurrent_sweeping_enabled())) {
976 heap_->mark_compact_collector()->EnsureSweepingCompleted();
978 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
982 } else if (state_ == MARKING) {
983 bytes_processed = ProcessMarkingDeque(bytes_to_process);
984 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
985 if (completion == FORCE_COMPLETION ||
986 IsIdleMarkingDelayCounterLimitReached()) {
987 if (FLAG_overapproximate_weak_closure &&
988 !weak_closure_was_overapproximated_) {
989 OverApproximateWeakClosure(action);
991 MarkingComplete(action);
994 IncrementIdleMarkingDelayCounter();
1001 // Speed up marking if we are marking too slow or if we are almost done
1005 double end = base::OS::TimeCurrentMillis();
1006 double duration = (end - start);
1007 // Note that we report zero bytes here when sweeping was in progress or
1008 // when we just started incremental marking. In these cases we did not
1009 // process the marking deque.
1010 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1012 return bytes_processed;
1016 void IncrementalMarking::ResetStepCounters() {
1018 old_generation_space_available_at_start_of_incremental_ =
1019 SpaceLeftInOldSpace();
1020 old_generation_space_used_at_start_of_incremental_ =
1021 heap_->PromotedTotalSize();
1022 bytes_rescanned_ = 0;
1023 marking_speed_ = kInitialMarkingSpeed;
1025 write_barriers_invoked_since_last_step_ = 0;
1029 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1030 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1034 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1035 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1039 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1040 idle_marking_delay_counter_++;
1044 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1045 idle_marking_delay_counter_ = 0;
1047 } // namespace internal