1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #include "src/heap/incremental-marking.h"
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
19 IncrementalMarking::IncrementalMarking(Heap* heap)
22 marking_deque_memory_(NULL),
23 marking_deque_memory_committed_(false),
25 old_generation_space_available_at_start_of_incremental_(0),
26 old_generation_space_used_at_start_of_incremental_(0),
30 no_marking_scope_depth_(0),
31 unscanned_bytes_of_large_object_(0) {}
34 void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
37 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
39 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
40 MarkBit obj_bit = Marking::MarkBitFrom(obj);
41 if (Marking::IsBlack(obj_bit)) {
42 // Object is not going to be rescanned we need to record the slot.
43 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
50 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
52 DCHECK(obj->IsHeapObject());
53 IncrementalMarking* marking = isolate->heap()->incremental_marking();
55 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
56 int counter = chunk->write_barrier_counter();
57 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
58 marking->write_barriers_invoked_since_last_step_ +=
59 MemoryChunk::kWriteBarrierCounterGranularity -
60 chunk->write_barrier_counter();
61 chunk->set_write_barrier_counter(
62 MemoryChunk::kWriteBarrierCounterGranularity);
65 marking->RecordWrite(obj, slot, *slot);
69 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
72 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
73 RecordWriteIntoCode(host, &rinfo, value);
78 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
80 Code* host = heap_->isolate()
81 ->inner_pointer_to_code_cache()
82 ->GcSafeFindCodeForInnerPointer(pc);
83 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
84 RecordWriteIntoCode(host, &rinfo, value);
89 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
92 if (BaseRecordWrite(host, slot, value)) {
94 heap_->mark_compact_collector()->RecordCodeEntrySlot(
95 reinterpret_cast<Address>(slot), value);
100 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
103 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104 if (Marking::IsWhite(value_bit)) {
105 MarkBit obj_bit = Marking::MarkBitFrom(obj);
106 if (Marking::IsBlack(obj_bit)) {
107 BlackToGreyAndUnshift(obj, obj_bit);
108 RestartIfNotMarking();
110 // Object is either grey or white. It will be scanned if survives.
114 if (is_compacting_) {
115 MarkBit obj_bit = Marking::MarkBitFrom(obj);
116 if (Marking::IsBlack(obj_bit)) {
117 // Object is not going to be rescanned. We need to record the slot.
118 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
125 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
126 if (obj->IsHeapObject()) {
127 HeapObject* heap_obj = HeapObject::cast(obj);
128 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129 if (Marking::IsBlack(mark_bit)) {
130 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
133 Marking::AnyToGrey(mark_bit);
138 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
139 MarkBit mark_bit, int size) {
140 DCHECK(!Marking::IsImpossible(mark_bit));
141 if (mark_bit.Get()) return;
143 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
144 DCHECK(Marking::IsBlack(mark_bit));
148 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
149 MarkBit mark_bit, int size) {
150 DCHECK(!Marking::IsImpossible(mark_bit));
151 if (Marking::IsBlack(mark_bit)) return;
152 Marking::MarkBlack(mark_bit);
153 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
154 DCHECK(Marking::IsBlack(mark_bit));
158 class IncrementalMarkingMarkingVisitor
159 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
161 static void Initialize() {
162 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
163 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
164 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
165 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
168 static const int kProgressBarScanningChunk = 32 * 1024;
170 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
171 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
172 // TODO(mstarzinger): Move setting of the flag to the allocation site of
173 // the array. The visitor should just check the flag.
174 if (FLAG_use_marking_progress_bar &&
175 chunk->owner()->identity() == LO_SPACE) {
176 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
178 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
179 Heap* heap = map->GetHeap();
180 // When using a progress bar for large fixed arrays, scan only a chunk of
181 // the array and try to push it onto the marking deque again until it is
182 // fully scanned. Fall back to scanning it through to the end in case this
183 // fails because of a full deque.
184 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
186 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
188 Min(object_size, start_offset + kProgressBarScanningChunk);
189 int already_scanned_offset = start_offset;
190 bool scan_until_end = false;
192 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
193 HeapObject::RawField(object, start_offset),
194 HeapObject::RawField(object, end_offset));
195 start_offset = end_offset;
196 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
197 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
198 } while (scan_until_end && start_offset < object_size);
199 chunk->set_progress_bar(start_offset);
200 if (start_offset < object_size) {
201 heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
202 heap->incremental_marking()->NotifyIncompleteScanOfObject(
203 object_size - (start_offset - already_scanned_offset));
206 FixedArrayVisitor::Visit(map, object);
210 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
211 Context* context = Context::cast(object);
213 // We will mark cache black with a separate pass when we finish marking.
214 // Note that GC can happen when the context is not fully initialized,
215 // so the cache can be undefined.
216 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
217 if (!cache->IsUndefined()) {
218 MarkObjectGreyDoNotEnqueue(cache);
220 VisitNativeContext(map, context);
223 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
225 if (obj->IsHeapObject()) {
226 heap->mark_compact_collector()->RecordSlot(p, p, obj);
227 MarkObject(heap, obj);
231 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
232 for (Object** p = start; p < end; p++) {
234 if (obj->IsHeapObject()) {
235 heap->mark_compact_collector()->RecordSlot(start, p, obj);
236 MarkObject(heap, obj);
241 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
242 Object** start, Object** end)) {
243 for (Object** p = start; p < end; p++) {
245 if (obj->IsHeapObject()) {
246 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
247 MarkObject(heap, obj);
252 // Marks the object grey and pushes it on the marking stack.
253 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
254 HeapObject* heap_object = HeapObject::cast(obj);
255 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
256 if (mark_bit.data_only()) {
257 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
258 } else if (Marking::IsWhite(mark_bit)) {
259 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
263 // Marks the object black without pushing it on the marking stack.
264 // Returns true if object needed marking and false otherwise.
265 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
266 HeapObject* heap_object = HeapObject::cast(obj);
267 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
268 if (Marking::IsWhite(mark_bit)) {
270 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
271 heap_object->Size());
279 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
281 explicit IncrementalMarkingRootMarkingVisitor(
282 IncrementalMarking* incremental_marking)
283 : incremental_marking_(incremental_marking) {}
285 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
287 void VisitPointers(Object** start, Object** end) {
288 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
292 void MarkObjectByPointer(Object** p) {
294 if (!obj->IsHeapObject()) return;
296 HeapObject* heap_object = HeapObject::cast(obj);
297 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
298 if (mark_bit.data_only()) {
299 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
301 if (Marking::IsWhite(mark_bit)) {
302 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
307 IncrementalMarking* incremental_marking_;
311 void IncrementalMarking::Initialize() {
312 IncrementalMarkingMarkingVisitor::Initialize();
316 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
318 bool is_compacting) {
320 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
323 // It's difficult to filter out slots recorded for large objects.
324 if (chunk->owner()->identity() == LO_SPACE &&
325 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
326 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
328 } else if (chunk->owner()->identity() == CELL_SPACE ||
329 chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
330 chunk->scan_on_scavenge()) {
331 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
342 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
344 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
346 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
348 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
352 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
354 PageIterator it(space);
355 while (it.has_next()) {
357 SetOldSpacePageFlags(p, false, false);
362 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
364 NewSpacePageIterator it(space);
365 while (it.has_next()) {
366 NewSpacePage* p = it.next();
367 SetNewSpacePageFlags(p, false);
372 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
373 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
374 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
375 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
376 DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
377 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
378 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
379 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
381 LargePage* lop = heap_->lo_space()->first_page();
382 while (lop->is_valid()) {
383 SetOldSpacePageFlags(lop, false, false);
384 lop = lop->next_page();
389 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
390 PageIterator it(space);
391 while (it.has_next()) {
393 SetOldSpacePageFlags(p, true, is_compacting_);
398 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
399 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
400 while (it.has_next()) {
401 NewSpacePage* p = it.next();
402 SetNewSpacePageFlags(p, true);
407 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
408 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
409 ActivateIncrementalWriteBarrier(heap_->old_data_space());
410 ActivateIncrementalWriteBarrier(heap_->cell_space());
411 ActivateIncrementalWriteBarrier(heap_->property_cell_space());
412 ActivateIncrementalWriteBarrier(heap_->map_space());
413 ActivateIncrementalWriteBarrier(heap_->code_space());
414 ActivateIncrementalWriteBarrier(heap_->new_space());
416 LargePage* lop = heap_->lo_space()->first_page();
417 while (lop->is_valid()) {
418 SetOldSpacePageFlags(lop, true, is_compacting_);
419 lop = lop->next_page();
424 bool IncrementalMarking::ShouldActivate() {
425 return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
429 bool IncrementalMarking::WorthActivating() {
431 static const intptr_t kActivationThreshold = 8 * MB;
433 // TODO(gc) consider setting this to some low level so that some
434 // debug tests run with incremental marking and some without.
435 static const intptr_t kActivationThreshold = 0;
437 // Only start incremental marking in a safe state: 1) when incremental
438 // marking is turned on, 2) when we are currently not in a GC, and
439 // 3) when we are currently not serializing or deserializing the heap.
440 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
441 heap_->gc_state() == Heap::NOT_IN_GC &&
442 !heap_->isolate()->serializer_enabled() &&
443 heap_->isolate()->IsInitialized() &&
444 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
448 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
449 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
452 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
453 // we don't need to do anything if incremental marking is
455 } else if (IsCompacting()) {
456 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
458 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
463 static void PatchIncrementalMarkingRecordWriteStubs(
464 Heap* heap, RecordWriteStub::Mode mode) {
465 UnseededNumberDictionary* stubs = heap->code_stubs();
467 int capacity = stubs->Capacity();
468 for (int i = 0; i < capacity; i++) {
469 Object* k = stubs->KeyAt(i);
470 if (stubs->IsKey(k)) {
471 uint32_t key = NumberToUint32(k);
473 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
474 Object* e = stubs->ValueAt(i);
476 RecordWriteStub::Patch(Code::cast(e), mode);
484 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
485 if (marking_deque_memory_ == NULL) {
486 marking_deque_memory_ = new base::VirtualMemory(4 * MB);
488 if (!marking_deque_memory_committed_) {
489 bool success = marking_deque_memory_->Commit(
490 reinterpret_cast<Address>(marking_deque_memory_->address()),
491 marking_deque_memory_->size(),
492 false); // Not executable.
494 marking_deque_memory_committed_ = true;
499 void IncrementalMarking::UncommitMarkingDeque() {
500 if (state_ == STOPPED && marking_deque_memory_committed_) {
501 bool success = marking_deque_memory_->Uncommit(
502 reinterpret_cast<Address>(marking_deque_memory_->address()),
503 marking_deque_memory_->size());
505 marking_deque_memory_committed_ = false;
510 void IncrementalMarking::Start(CompactionFlag flag) {
511 if (FLAG_trace_incremental_marking) {
512 PrintF("[IncrementalMarking] Start\n");
514 DCHECK(FLAG_incremental_marking);
515 DCHECK(FLAG_incremental_marking_steps);
516 DCHECK(state_ == STOPPED);
517 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
518 DCHECK(!heap_->isolate()->serializer_enabled());
519 DCHECK(heap_->isolate()->IsInitialized());
523 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
526 if (FLAG_trace_incremental_marking) {
527 PrintF("[IncrementalMarking] Start sweeping.\n");
532 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
536 void IncrementalMarking::StartMarking(CompactionFlag flag) {
537 if (FLAG_trace_incremental_marking) {
538 PrintF("[IncrementalMarking] Start marking\n");
541 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
542 heap_->mark_compact_collector()->StartCompaction(
543 MarkCompactCollector::INCREMENTAL_COMPACTION);
547 RecordWriteStub::Mode mode = is_compacting_
548 ? RecordWriteStub::INCREMENTAL_COMPACTION
549 : RecordWriteStub::INCREMENTAL;
551 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
553 EnsureMarkingDequeIsCommitted();
555 // Initialize marking stack.
556 Address addr = static_cast<Address>(marking_deque_memory_->address());
557 size_t size = marking_deque_memory_->size();
558 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
559 marking_deque_.Initialize(addr, addr + size);
561 ActivateIncrementalWriteBarrier();
563 // Marking bits are cleared by the sweeper.
565 if (FLAG_verify_heap) {
566 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
570 heap_->CompletelyClearInstanceofCache();
571 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
573 if (FLAG_cleanup_code_caches_at_gc) {
574 // We will mark cache black with a separate pass
575 // when we finish marking.
576 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
579 // Mark strong roots grey.
580 IncrementalMarkingRootMarkingVisitor visitor(this);
581 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
583 heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
585 // Ready to start incremental marking.
586 if (FLAG_trace_incremental_marking) {
587 PrintF("[IncrementalMarking] Running\n");
592 void IncrementalMarking::PrepareForScavenge() {
593 if (!IsMarking()) return;
594 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
595 heap_->new_space()->FromSpaceEnd());
596 while (it.has_next()) {
597 Bitmap::Clear(it.next());
602 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
603 if (!IsMarking()) return;
605 int current = marking_deque_.bottom();
606 int mask = marking_deque_.mask();
607 int limit = marking_deque_.top();
608 HeapObject** array = marking_deque_.array();
609 int new_top = current;
611 Map* filler_map = heap_->one_pointer_filler_map();
613 while (current != limit) {
614 HeapObject* obj = array[current];
615 DCHECK(obj->IsHeapObject());
616 current = ((current + 1) & mask);
617 if (heap_->InNewSpace(obj)) {
618 MapWord map_word = obj->map_word();
619 if (map_word.IsForwardingAddress()) {
620 HeapObject* dest = map_word.ToForwardingAddress();
621 array[new_top] = dest;
622 new_top = ((new_top + 1) & mask);
623 DCHECK(new_top != marking_deque_.bottom());
625 MarkBit mark_bit = Marking::MarkBitFrom(obj);
626 DCHECK(Marking::IsGrey(mark_bit) ||
627 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
630 } else if (obj->map() != filler_map) {
631 // Skip one word filler objects that appear on the
632 // stack when we perform in place array shift.
633 array[new_top] = obj;
634 new_top = ((new_top + 1) & mask);
635 DCHECK(new_top != marking_deque_.bottom());
637 MarkBit mark_bit = Marking::MarkBitFrom(obj);
638 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
639 DCHECK(Marking::IsGrey(mark_bit) ||
640 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
641 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
642 Marking::IsBlack(mark_bit)));
646 marking_deque_.set_top(new_top);
650 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
651 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
652 if (Marking::IsWhite(map_mark_bit)) {
653 WhiteToGreyAndPush(map, map_mark_bit);
656 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
658 MarkBit mark_bit = Marking::MarkBitFrom(obj);
659 #if ENABLE_SLOW_DCHECKS
660 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
661 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
662 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
663 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
664 Marking::IsBlack(mark_bit)));
666 MarkBlackOrKeepBlack(obj, mark_bit, size);
670 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
671 intptr_t bytes_processed = 0;
672 Map* filler_map = heap_->one_pointer_filler_map();
673 while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
674 HeapObject* obj = marking_deque_.Pop();
676 // Explicitly skip one word fillers. Incremental markbit patterns are
677 // correct only for objects that occupy at least two words.
678 Map* map = obj->map();
679 if (map == filler_map) continue;
681 int size = obj->SizeFromMap(map);
682 unscanned_bytes_of_large_object_ = 0;
683 VisitObject(map, obj, size);
684 int delta = (size - unscanned_bytes_of_large_object_);
685 // TODO(jochen): remove after http://crbug.com/381820 is resolved.
687 bytes_processed += delta;
689 return bytes_processed;
693 void IncrementalMarking::ProcessMarkingDeque() {
694 Map* filler_map = heap_->one_pointer_filler_map();
695 while (!marking_deque_.IsEmpty()) {
696 HeapObject* obj = marking_deque_.Pop();
698 // Explicitly skip one word fillers. Incremental markbit patterns are
699 // correct only for objects that occupy at least two words.
700 Map* map = obj->map();
701 if (map == filler_map) continue;
703 VisitObject(map, obj, obj->SizeFromMap(map));
708 void IncrementalMarking::Hurry() {
709 if (state() == MARKING) {
711 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
712 start = base::OS::TimeCurrentMillis();
713 if (FLAG_trace_incremental_marking) {
714 PrintF("[IncrementalMarking] Hurry\n");
717 // TODO(gc) hurry can mark objects it encounters black as mutator
719 ProcessMarkingDeque();
721 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
722 double end = base::OS::TimeCurrentMillis();
723 double delta = end - start;
724 heap_->tracer()->AddMarkingTime(delta);
725 if (FLAG_trace_incremental_marking) {
726 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
727 static_cast<int>(delta));
732 if (FLAG_cleanup_code_caches_at_gc) {
733 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
734 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
735 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
736 PolymorphicCodeCache::kSize);
739 Object* context = heap_->native_contexts_list();
740 while (!context->IsUndefined()) {
741 // GC can happen when the context is not fully initialized,
742 // so the cache can be undefined.
743 HeapObject* cache = HeapObject::cast(
744 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
745 if (!cache->IsUndefined()) {
746 MarkBit mark_bit = Marking::MarkBitFrom(cache);
747 if (Marking::IsGrey(mark_bit)) {
748 Marking::GreyToBlack(mark_bit);
749 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
752 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
757 void IncrementalMarking::Abort() {
758 if (IsStopped()) return;
759 if (FLAG_trace_incremental_marking) {
760 PrintF("[IncrementalMarking] Aborting.\n");
762 heap_->new_space()->LowerInlineAllocationLimit(0);
763 IncrementalMarking::set_should_hurry(false);
766 PatchIncrementalMarkingRecordWriteStubs(heap_,
767 RecordWriteStub::STORE_BUFFER_ONLY);
768 DeactivateIncrementalWriteBarrier();
770 if (is_compacting_) {
771 LargeObjectIterator it(heap_->lo_space());
772 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
773 Page* p = Page::FromAddress(obj->address());
774 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
775 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
780 heap_->isolate()->stack_guard()->ClearGC();
782 is_compacting_ = false;
786 void IncrementalMarking::Finalize() {
789 is_compacting_ = false;
790 heap_->new_space()->LowerInlineAllocationLimit(0);
791 IncrementalMarking::set_should_hurry(false);
793 PatchIncrementalMarkingRecordWriteStubs(heap_,
794 RecordWriteStub::STORE_BUFFER_ONLY);
795 DeactivateIncrementalWriteBarrier();
796 DCHECK(marking_deque_.IsEmpty());
797 heap_->isolate()->stack_guard()->ClearGC();
801 void IncrementalMarking::MarkingComplete(CompletionAction action) {
803 // We will set the stack guard to request a GC now. This will mean the rest
804 // of the GC gets performed as soon as possible (we can't do a GC here in a
805 // record-write context). If a few things get allocated between now and then
806 // that shouldn't make us do a scavenge and keep being incremental, so we set
807 // the should-hurry flag to indicate that there can't be much work left to do.
808 set_should_hurry(true);
809 if (FLAG_trace_incremental_marking) {
810 PrintF("[IncrementalMarking] Complete (normal).\n");
812 if (action == GC_VIA_STACK_GUARD) {
813 heap_->isolate()->stack_guard()->RequestGC();
818 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
819 if (IsStopped() && ShouldActivate()) {
820 // TODO(hpayer): Let's play safe for now, but compaction should be
821 // in principle possible.
822 Start(PREVENT_COMPACTION);
824 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
829 void IncrementalMarking::SpeedUp() {
830 bool speed_up = false;
832 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
834 PrintPID("Speed up marking after %d steps\n",
835 static_cast<int>(kMarkingSpeedAccellerationInterval));
840 bool space_left_is_very_small =
841 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
843 bool only_1_nth_of_space_that_was_available_still_left =
844 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
845 old_generation_space_available_at_start_of_incremental_);
847 if (space_left_is_very_small ||
848 only_1_nth_of_space_that_was_available_still_left) {
849 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
853 bool size_of_old_space_multiplied_by_n_during_marking =
854 (heap_->PromotedTotalSize() >
855 (marking_speed_ + 1) *
856 old_generation_space_used_at_start_of_incremental_);
857 if (size_of_old_space_multiplied_by_n_during_marking) {
860 PrintPID("Speed up marking because of heap size increase\n");
864 int64_t promoted_during_marking =
865 heap_->PromotedTotalSize() -
866 old_generation_space_used_at_start_of_incremental_;
867 intptr_t delay = marking_speed_ * MB;
868 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
870 // We try to scan at at least twice the speed that we are allocating.
871 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
873 PrintPID("Speed up marking because marker was not keeping up\n");
879 if (state_ != MARKING) {
881 PrintPID("Postponing speeding up marking until marking starts\n");
884 marking_speed_ += kMarkingSpeedAccelleration;
885 marking_speed_ = static_cast<int>(
886 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
888 PrintPID("Marking speed increased to %d\n", marking_speed_);
895 void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
896 bool force_marking) {
897 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
898 !FLAG_incremental_marking_steps ||
899 (state_ != SWEEPING && state_ != MARKING)) {
903 allocated_ += allocated_bytes;
905 if (!force_marking && allocated_ < kAllocatedThreshold &&
906 write_barriers_invoked_since_last_step_ <
907 kWriteBarriersInvokedThreshold) {
911 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
914 HistogramTimerScope incremental_marking_scope(
915 heap_->isolate()->counters()->gc_incremental_marking());
916 double start = base::OS::TimeCurrentMillis();
918 // The marking speed is driven either by the allocation rate or by the rate
919 // at which we are having to check the color of objects in the write
921 // It is possible for a tight non-allocating loop to run a lot of write
922 // barriers before we get here and check them (marking can only take place
924 // allocation), so to reduce the lumpiness we don't use the write barriers
925 // invoked since last step directly to determine the amount of work to do.
926 intptr_t bytes_to_process =
928 Max(allocated_, write_barriers_invoked_since_last_step_);
930 write_barriers_invoked_since_last_step_ = 0;
932 bytes_scanned_ += bytes_to_process;
933 intptr_t bytes_processed = 0;
935 if (state_ == SWEEPING) {
936 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
937 heap_->mark_compact_collector()->IsSweepingCompleted()) {
938 heap_->mark_compact_collector()->EnsureSweepingCompleted();
940 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
942 StartMarking(PREVENT_COMPACTION);
944 } else if (state_ == MARKING) {
945 bytes_processed = ProcessMarkingDeque(bytes_to_process);
946 if (marking_deque_.IsEmpty()) MarkingComplete(action);
951 // Speed up marking if we are marking too slow or if we are almost done
955 double end = base::OS::TimeCurrentMillis();
956 double duration = (end - start);
957 // Note that we report zero bytes here when sweeping was in progress or
958 // when we just started incremental marking. In these cases we did not
959 // process the marking deque.
960 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
965 void IncrementalMarking::ResetStepCounters() {
967 old_generation_space_available_at_start_of_incremental_ =
968 SpaceLeftInOldSpace();
969 old_generation_space_used_at_start_of_incremental_ =
970 heap_->PromotedTotalSize();
971 bytes_rescanned_ = 0;
972 marking_speed_ = kInitialMarkingSpeed;
974 write_barriers_invoked_since_last_step_ = 0;
978 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
979 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
982 } // namespace v8::internal