1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/heap/incremental-marking.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
19 IncrementalMarking::IncrementalMarking(Heap* heap)
23 old_generation_space_available_at_start_of_incremental_(0),
24 old_generation_space_used_at_start_of_incremental_(0),
28 idle_marking_delay_counter_(0),
29 no_marking_scope_depth_(0),
30 unscanned_bytes_of_large_object_(0),
31 was_activated_(false),
32 weak_closure_was_overapproximated_(false),
33 request_type_(COMPLETE_MARKING) {}
36 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
38 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
39 MarkBit obj_bit = Marking::MarkBitFrom(obj);
40 if (Marking::IsBlack(obj_bit)) {
41 // Object is not going to be rescanned we need to record the slot.
42 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
49 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
51 DCHECK(obj->IsHeapObject());
52 IncrementalMarking* marking = isolate->heap()->incremental_marking();
54 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
55 int counter = chunk->write_barrier_counter();
56 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
57 marking->write_barriers_invoked_since_last_step_ +=
58 MemoryChunk::kWriteBarrierCounterGranularity -
59 chunk->write_barrier_counter();
60 chunk->set_write_barrier_counter(
61 MemoryChunk::kWriteBarrierCounterGranularity);
64 marking->RecordWrite(obj, slot, *slot);
68 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
71 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
72 RecordWriteIntoCode(host, &rinfo, value);
77 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
79 Code* host = heap_->isolate()
80 ->inner_pointer_to_code_cache()
81 ->GcSafeFindCodeForInnerPointer(pc);
82 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
83 RecordWriteIntoCode(host, &rinfo, value);
88 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
91 if (BaseRecordWrite(host, slot, value)) {
93 heap_->mark_compact_collector()->RecordCodeEntrySlot(
94 reinterpret_cast<Address>(slot), value);
99 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
102 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
103 if (Marking::IsWhite(value_bit)) {
104 MarkBit obj_bit = Marking::MarkBitFrom(obj);
105 if (Marking::IsBlack(obj_bit)) {
106 BlackToGreyAndUnshift(obj, obj_bit);
107 RestartIfNotMarking();
109 // Object is either grey or white. It will be scanned if survives.
113 if (is_compacting_) {
114 MarkBit obj_bit = Marking::MarkBitFrom(obj);
115 if (Marking::IsBlack(obj_bit)) {
116 // Object is not going to be rescanned. We need to record the slot.
117 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
124 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
125 if (obj->IsHeapObject()) {
126 HeapObject* heap_obj = HeapObject::cast(obj);
127 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
128 if (Marking::IsBlack(mark_bit)) {
129 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
132 Marking::AnyToGrey(mark_bit);
137 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
138 MarkBit mark_bit, int size) {
139 DCHECK(!Marking::IsImpossible(mark_bit));
140 if (mark_bit.Get()) return;
142 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
143 DCHECK(Marking::IsBlack(mark_bit));
147 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
148 MarkBit mark_bit, int size) {
149 DCHECK(!Marking::IsImpossible(mark_bit));
150 if (Marking::IsBlack(mark_bit)) return;
151 Marking::MarkBlack(mark_bit);
152 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
153 DCHECK(Marking::IsBlack(mark_bit));
157 class IncrementalMarkingMarkingVisitor
158 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
160 static void Initialize() {
161 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
162 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
163 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
164 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
167 static const int kProgressBarScanningChunk = 32 * 1024;
169 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
170 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
171 // TODO(mstarzinger): Move setting of the flag to the allocation site of
172 // the array. The visitor should just check the flag.
173 if (FLAG_use_marking_progress_bar &&
174 chunk->owner()->identity() == LO_SPACE) {
175 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
177 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
178 Heap* heap = map->GetHeap();
179 // When using a progress bar for large fixed arrays, scan only a chunk of
180 // the array and try to push it onto the marking deque again until it is
181 // fully scanned. Fall back to scanning it through to the end in case this
182 // fails because of a full deque.
183 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
185 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
187 Min(object_size, start_offset + kProgressBarScanningChunk);
188 int already_scanned_offset = start_offset;
189 bool scan_until_end = false;
191 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
192 HeapObject::RawField(object, start_offset),
193 HeapObject::RawField(object, end_offset));
194 start_offset = end_offset;
195 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
197 heap->mark_compact_collector()->marking_deque()->IsFull();
198 } while (scan_until_end && start_offset < object_size);
199 chunk->set_progress_bar(start_offset);
200 if (start_offset < object_size) {
201 heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
202 heap->incremental_marking()->NotifyIncompleteScanOfObject(
203 object_size - (start_offset - already_scanned_offset));
206 FixedArrayVisitor::Visit(map, object);
210 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
211 Context* context = Context::cast(object);
213 // We will mark cache black with a separate pass when we finish marking.
214 // Note that GC can happen when the context is not fully initialized,
215 // so the cache can be undefined.
216 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
217 if (!cache->IsUndefined()) {
218 MarkObjectGreyDoNotEnqueue(cache);
220 VisitNativeContext(map, context);
223 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
225 if (obj->IsHeapObject()) {
226 heap->mark_compact_collector()->RecordSlot(p, p, obj);
227 MarkObject(heap, obj);
231 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
232 for (Object** p = start; p < end; p++) {
234 if (obj->IsHeapObject()) {
235 heap->mark_compact_collector()->RecordSlot(start, p, obj);
236 MarkObject(heap, obj);
241 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
242 Object** start, Object** end)) {
243 for (Object** p = start; p < end; p++) {
245 if (obj->IsHeapObject()) {
246 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
247 MarkObject(heap, obj);
252 // Marks the object grey and pushes it on the marking stack.
253 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
254 HeapObject* heap_object = HeapObject::cast(obj);
255 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
256 if (mark_bit.data_only()) {
257 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
258 } else if (Marking::IsWhite(mark_bit)) {
259 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
263 // Marks the object black without pushing it on the marking stack.
264 // Returns true if object needed marking and false otherwise.
265 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
266 HeapObject* heap_object = HeapObject::cast(obj);
267 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
268 if (Marking::IsWhite(mark_bit)) {
270 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
271 heap_object->Size());
279 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
281 explicit IncrementalMarkingRootMarkingVisitor(
282 IncrementalMarking* incremental_marking)
283 : incremental_marking_(incremental_marking) {}
285 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
287 void VisitPointers(Object** start, Object** end) {
288 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
292 void MarkObjectByPointer(Object** p) {
294 if (!obj->IsHeapObject()) return;
296 HeapObject* heap_object = HeapObject::cast(obj);
297 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
298 if (mark_bit.data_only()) {
299 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
301 if (Marking::IsWhite(mark_bit)) {
302 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
307 IncrementalMarking* incremental_marking_;
311 void IncrementalMarking::Initialize() {
312 IncrementalMarkingMarkingVisitor::Initialize();
316 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
318 bool is_compacting) {
320 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
323 // It's difficult to filter out slots recorded for large objects.
324 if (chunk->owner()->identity() == LO_SPACE &&
325 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
326 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
328 } else if (chunk->owner()->identity() == CELL_SPACE ||
329 chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
330 chunk->scan_on_scavenge()) {
331 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
342 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
344 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
346 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
348 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
352 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
354 PageIterator it(space);
355 while (it.has_next()) {
357 SetOldSpacePageFlags(p, false, false);
362 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
364 NewSpacePageIterator it(space);
365 while (it.has_next()) {
366 NewSpacePage* p = it.next();
367 SetNewSpacePageFlags(p, false);
372 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
373 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
374 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
375 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
376 DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
377 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
378 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
379 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
381 LargePage* lop = heap_->lo_space()->first_page();
382 while (lop->is_valid()) {
383 SetOldSpacePageFlags(lop, false, false);
384 lop = lop->next_page();
389 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
390 PageIterator it(space);
391 while (it.has_next()) {
393 SetOldSpacePageFlags(p, true, is_compacting_);
398 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
399 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
400 while (it.has_next()) {
401 NewSpacePage* p = it.next();
402 SetNewSpacePageFlags(p, true);
407 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
408 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
409 ActivateIncrementalWriteBarrier(heap_->old_data_space());
410 ActivateIncrementalWriteBarrier(heap_->cell_space());
411 ActivateIncrementalWriteBarrier(heap_->property_cell_space());
412 ActivateIncrementalWriteBarrier(heap_->map_space());
413 ActivateIncrementalWriteBarrier(heap_->code_space());
414 ActivateIncrementalWriteBarrier(heap_->new_space());
416 LargePage* lop = heap_->lo_space()->first_page();
417 while (lop->is_valid()) {
418 SetOldSpacePageFlags(lop, true, is_compacting_);
419 lop = lop->next_page();
424 bool IncrementalMarking::ShouldActivate() {
425 return WorthActivating() &&
426 heap_->NextGCIsLikelyToBeFull(
427 heap_->old_generation_allocation_limit());
431 bool IncrementalMarking::WasActivated() { return was_activated_; }
434 bool IncrementalMarking::WorthActivating() {
436 static const intptr_t kActivationThreshold = 8 * MB;
438 // TODO(gc) consider setting this to some low level so that some
439 // debug tests run with incremental marking and some without.
440 static const intptr_t kActivationThreshold = 0;
442 // Only start incremental marking in a safe state: 1) when incremental
443 // marking is turned on, 2) when we are currently not in a GC, and
444 // 3) when we are currently not serializing or deserializing the heap.
445 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
446 heap_->gc_state() == Heap::NOT_IN_GC &&
447 heap_->deserialization_complete() &&
448 !heap_->isolate()->serializer_enabled() &&
449 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
453 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
454 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
457 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
458 // we don't need to do anything if incremental marking is
460 } else if (IsCompacting()) {
461 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
463 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
468 static void PatchIncrementalMarkingRecordWriteStubs(
469 Heap* heap, RecordWriteStub::Mode mode) {
470 UnseededNumberDictionary* stubs = heap->code_stubs();
472 int capacity = stubs->Capacity();
473 for (int i = 0; i < capacity; i++) {
474 Object* k = stubs->KeyAt(i);
475 if (stubs->IsKey(k)) {
476 uint32_t key = NumberToUint32(k);
478 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
479 Object* e = stubs->ValueAt(i);
481 RecordWriteStub::Patch(Code::cast(e), mode);
489 void IncrementalMarking::Start(CompactionFlag flag) {
490 if (FLAG_trace_incremental_marking) {
491 PrintF("[IncrementalMarking] Start\n");
493 DCHECK(FLAG_incremental_marking);
494 DCHECK(FLAG_incremental_marking_steps);
495 DCHECK(state_ == STOPPED);
496 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
497 DCHECK(!heap_->isolate()->serializer_enabled());
501 was_activated_ = true;
503 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
506 if (FLAG_trace_incremental_marking) {
507 PrintF("[IncrementalMarking] Start sweeping.\n");
512 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
516 void IncrementalMarking::StartMarking(CompactionFlag flag) {
517 if (FLAG_trace_incremental_marking) {
518 PrintF("[IncrementalMarking] Start marking\n");
521 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
522 heap_->mark_compact_collector()->StartCompaction(
523 MarkCompactCollector::INCREMENTAL_COMPACTION);
527 RecordWriteStub::Mode mode = is_compacting_
528 ? RecordWriteStub::INCREMENTAL_COMPACTION
529 : RecordWriteStub::INCREMENTAL;
531 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
533 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
535 ActivateIncrementalWriteBarrier();
537 // Marking bits are cleared by the sweeper.
539 if (FLAG_verify_heap) {
540 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
544 heap_->CompletelyClearInstanceofCache();
545 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
547 if (FLAG_cleanup_code_caches_at_gc) {
548 // We will mark cache black with a separate pass
549 // when we finish marking.
550 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
553 // Mark strong roots grey.
554 IncrementalMarkingRootMarkingVisitor visitor(this);
555 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
557 // Ready to start incremental marking.
558 if (FLAG_trace_incremental_marking) {
559 PrintF("[IncrementalMarking] Running\n");
564 void IncrementalMarking::PrepareForScavenge() {
565 if (!IsMarking()) return;
566 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
567 heap_->new_space()->FromSpaceEnd());
568 while (it.has_next()) {
569 Bitmap::Clear(it.next());
574 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
575 if (!IsMarking()) return;
577 MarkingDeque* marking_deque =
578 heap_->mark_compact_collector()->marking_deque();
579 int current = marking_deque->bottom();
580 int mask = marking_deque->mask();
581 int limit = marking_deque->top();
582 HeapObject** array = marking_deque->array();
583 int new_top = current;
585 Map* filler_map = heap_->one_pointer_filler_map();
587 while (current != limit) {
588 HeapObject* obj = array[current];
589 DCHECK(obj->IsHeapObject());
590 current = ((current + 1) & mask);
591 if (heap_->InNewSpace(obj)) {
592 MapWord map_word = obj->map_word();
593 if (map_word.IsForwardingAddress()) {
594 HeapObject* dest = map_word.ToForwardingAddress();
595 array[new_top] = dest;
596 new_top = ((new_top + 1) & mask);
597 DCHECK(new_top != marking_deque->bottom());
599 MarkBit mark_bit = Marking::MarkBitFrom(obj);
600 DCHECK(Marking::IsGrey(mark_bit) ||
601 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
604 } else if (obj->map() != filler_map) {
605 // Skip one word filler objects that appear on the
606 // stack when we perform in place array shift.
607 array[new_top] = obj;
608 new_top = ((new_top + 1) & mask);
609 DCHECK(new_top != marking_deque->bottom());
611 MarkBit mark_bit = Marking::MarkBitFrom(obj);
612 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
613 DCHECK(Marking::IsGrey(mark_bit) ||
614 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
615 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
616 Marking::IsBlack(mark_bit)));
620 marking_deque->set_top(new_top);
624 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
625 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
626 if (Marking::IsWhite(map_mark_bit)) {
627 WhiteToGreyAndPush(map, map_mark_bit);
630 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
632 MarkBit mark_bit = Marking::MarkBitFrom(obj);
633 #if ENABLE_SLOW_DCHECKS
634 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
635 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
636 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
637 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
638 Marking::IsBlack(mark_bit)));
640 MarkBlackOrKeepBlack(obj, mark_bit, size);
644 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
645 intptr_t bytes_processed = 0;
646 Map* filler_map = heap_->one_pointer_filler_map();
647 MarkingDeque* marking_deque =
648 heap_->mark_compact_collector()->marking_deque();
649 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
650 HeapObject* obj = marking_deque->Pop();
652 // Explicitly skip one word fillers. Incremental markbit patterns are
653 // correct only for objects that occupy at least two words.
654 Map* map = obj->map();
655 if (map == filler_map) continue;
657 int size = obj->SizeFromMap(map);
658 unscanned_bytes_of_large_object_ = 0;
659 VisitObject(map, obj, size);
660 bytes_processed += size - unscanned_bytes_of_large_object_;
662 return bytes_processed;
666 void IncrementalMarking::ProcessMarkingDeque() {
667 Map* filler_map = heap_->one_pointer_filler_map();
668 MarkingDeque* marking_deque =
669 heap_->mark_compact_collector()->marking_deque();
670 while (!marking_deque->IsEmpty()) {
671 HeapObject* obj = marking_deque->Pop();
673 // Explicitly skip one word fillers. Incremental markbit patterns are
674 // correct only for objects that occupy at least two words.
675 Map* map = obj->map();
676 if (map == filler_map) continue;
678 VisitObject(map, obj, obj->SizeFromMap(map));
683 void IncrementalMarking::Hurry() {
684 if (state() == MARKING) {
686 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
687 start = base::OS::TimeCurrentMillis();
688 if (FLAG_trace_incremental_marking) {
689 PrintF("[IncrementalMarking] Hurry\n");
692 // TODO(gc) hurry can mark objects it encounters black as mutator
694 ProcessMarkingDeque();
696 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
697 double end = base::OS::TimeCurrentMillis();
698 double delta = end - start;
699 heap_->tracer()->AddMarkingTime(delta);
700 if (FLAG_trace_incremental_marking) {
701 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
702 static_cast<int>(delta));
707 if (FLAG_cleanup_code_caches_at_gc) {
708 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
709 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
710 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
711 PolymorphicCodeCache::kSize);
714 Object* context = heap_->native_contexts_list();
715 while (!context->IsUndefined()) {
716 // GC can happen when the context is not fully initialized,
717 // so the cache can be undefined.
718 HeapObject* cache = HeapObject::cast(
719 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
720 if (!cache->IsUndefined()) {
721 MarkBit mark_bit = Marking::MarkBitFrom(cache);
722 if (Marking::IsGrey(mark_bit)) {
723 Marking::GreyToBlack(mark_bit);
724 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
727 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
732 void IncrementalMarking::Abort() {
733 if (IsStopped()) return;
734 if (FLAG_trace_incremental_marking) {
735 PrintF("[IncrementalMarking] Aborting.\n");
737 heap_->new_space()->LowerInlineAllocationLimit(0);
738 IncrementalMarking::set_should_hurry(false);
741 PatchIncrementalMarkingRecordWriteStubs(heap_,
742 RecordWriteStub::STORE_BUFFER_ONLY);
743 DeactivateIncrementalWriteBarrier();
745 if (is_compacting_) {
746 LargeObjectIterator it(heap_->lo_space());
747 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
748 Page* p = Page::FromAddress(obj->address());
749 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
750 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
755 heap_->isolate()->stack_guard()->ClearGC();
757 is_compacting_ = false;
761 void IncrementalMarking::Finalize() {
764 is_compacting_ = false;
765 heap_->new_space()->LowerInlineAllocationLimit(0);
766 IncrementalMarking::set_should_hurry(false);
768 PatchIncrementalMarkingRecordWriteStubs(heap_,
769 RecordWriteStub::STORE_BUFFER_ONLY);
770 DeactivateIncrementalWriteBarrier();
771 DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
772 heap_->isolate()->stack_guard()->ClearGC();
776 void IncrementalMarking::OverApproximateWeakClosure() {
777 DCHECK(FLAG_overapproximate_weak_closure);
778 DCHECK(!weak_closure_was_overapproximated_);
779 if (FLAG_trace_incremental_marking) {
780 PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
782 set_should_hurry(true);
783 request_type_ = OVERAPPROXIMATION;
784 heap_->isolate()->stack_guard()->RequestGC();
788 void IncrementalMarking::MarkingComplete(CompletionAction action) {
790 // We will set the stack guard to request a GC now. This will mean the rest
791 // of the GC gets performed as soon as possible (we can't do a GC here in a
792 // record-write context). If a few things get allocated between now and then
793 // that shouldn't make us do a scavenge and keep being incremental, so we set
794 // the should-hurry flag to indicate that there can't be much work left to do.
795 set_should_hurry(true);
796 if (FLAG_trace_incremental_marking) {
797 PrintF("[IncrementalMarking] Complete (normal).\n");
799 if (action == GC_VIA_STACK_GUARD) {
800 request_type_ = COMPLETE_MARKING;
801 heap_->isolate()->stack_guard()->RequestGC();
806 void IncrementalMarking::Epilogue() {
807 was_activated_ = false;
808 weak_closure_was_overapproximated_ = false;
812 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
813 if (IsStopped() && ShouldActivate()) {
814 // TODO(hpayer): Let's play safe for now, but compaction should be
815 // in principle possible.
816 Start(PREVENT_COMPACTION);
818 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
823 void IncrementalMarking::SpeedUp() {
824 bool speed_up = false;
826 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
828 PrintPID("Speed up marking after %d steps\n",
829 static_cast<int>(kMarkingSpeedAccellerationInterval));
834 bool space_left_is_very_small =
835 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
837 bool only_1_nth_of_space_that_was_available_still_left =
838 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
839 old_generation_space_available_at_start_of_incremental_);
841 if (space_left_is_very_small ||
842 only_1_nth_of_space_that_was_available_still_left) {
843 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
847 bool size_of_old_space_multiplied_by_n_during_marking =
848 (heap_->PromotedTotalSize() >
849 (marking_speed_ + 1) *
850 old_generation_space_used_at_start_of_incremental_);
851 if (size_of_old_space_multiplied_by_n_during_marking) {
854 PrintPID("Speed up marking because of heap size increase\n");
858 int64_t promoted_during_marking =
859 heap_->PromotedTotalSize() -
860 old_generation_space_used_at_start_of_incremental_;
861 intptr_t delay = marking_speed_ * MB;
862 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
864 // We try to scan at at least twice the speed that we are allocating.
865 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
867 PrintPID("Speed up marking because marker was not keeping up\n");
873 if (state_ != MARKING) {
875 PrintPID("Postponing speeding up marking until marking starts\n");
878 marking_speed_ += kMarkingSpeedAccelleration;
879 marking_speed_ = static_cast<int>(
880 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
882 PrintPID("Marking speed increased to %d\n", marking_speed_);
889 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
890 CompletionAction action,
891 ForceMarkingAction marking,
892 ForceCompletionAction completion) {
893 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
894 !FLAG_incremental_marking_steps ||
895 (state_ != SWEEPING && state_ != MARKING)) {
899 allocated_ += allocated_bytes;
901 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
902 write_barriers_invoked_since_last_step_ <
903 kWriteBarriersInvokedThreshold) {
907 // If an idle notification happened recently, we delay marking steps.
908 if (marking == DO_NOT_FORCE_MARKING &&
909 heap_->RecentIdleNotificationHappened()) {
913 if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
915 intptr_t bytes_processed = 0;
917 HistogramTimerScope incremental_marking_scope(
918 heap_->isolate()->counters()->gc_incremental_marking());
919 double start = base::OS::TimeCurrentMillis();
921 // The marking speed is driven either by the allocation rate or by the rate
922 // at which we are having to check the color of objects in the write
924 // It is possible for a tight non-allocating loop to run a lot of write
925 // barriers before we get here and check them (marking can only take place
927 // allocation), so to reduce the lumpiness we don't use the write barriers
928 // invoked since last step directly to determine the amount of work to do.
929 intptr_t bytes_to_process =
931 Max(allocated_, write_barriers_invoked_since_last_step_);
933 write_barriers_invoked_since_last_step_ = 0;
935 bytes_scanned_ += bytes_to_process;
937 if (state_ == SWEEPING) {
938 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
939 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
940 !FLAG_concurrent_sweeping)) {
941 heap_->mark_compact_collector()->EnsureSweepingCompleted();
943 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
945 StartMarking(PREVENT_COMPACTION);
947 } else if (state_ == MARKING) {
948 bytes_processed = ProcessMarkingDeque(bytes_to_process);
949 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
950 if (completion == FORCE_COMPLETION ||
951 IsIdleMarkingDelayCounterLimitReached()) {
952 if (FLAG_overapproximate_weak_closure &&
953 !weak_closure_was_overapproximated_ &&
954 action == GC_VIA_STACK_GUARD) {
955 OverApproximateWeakClosure();
957 MarkingComplete(action);
960 IncrementIdleMarkingDelayCounter();
967 // Speed up marking if we are marking too slow or if we are almost done
971 double end = base::OS::TimeCurrentMillis();
972 double duration = (end - start);
973 // Note that we report zero bytes here when sweeping was in progress or
974 // when we just started incremental marking. In these cases we did not
975 // process the marking deque.
976 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
978 return bytes_processed;
982 void IncrementalMarking::ResetStepCounters() {
984 old_generation_space_available_at_start_of_incremental_ =
985 SpaceLeftInOldSpace();
986 old_generation_space_used_at_start_of_incremental_ =
987 heap_->PromotedTotalSize();
988 bytes_rescanned_ = 0;
989 marking_speed_ = kInitialMarkingSpeed;
991 write_barriers_invoked_since_last_step_ = 0;
995 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
996 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1000 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1001 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1005 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1006 idle_marking_delay_counter_++;
1010 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1011 idle_marking_delay_counter_ = 0;
1014 } // namespace v8::internal