deps: backport IsValid changes from 4e8736d in V8
[platform/upstream/nodejs.git] / deps / v8 / src / heap / incremental-marking.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/heap/incremental-marking.h"
8
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
20   return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
21                      IncrementalMarking::FORCE_MARKING,
22                      IncrementalMarking::DO_NOT_FORCE_COMPLETION);
23 }
24
25
26 IncrementalMarking::IncrementalMarking(Heap* heap)
27     : heap_(heap),
28       state_(STOPPED),
29       is_compacting_(false),
30       steps_count_(0),
31       old_generation_space_available_at_start_of_incremental_(0),
32       old_generation_space_used_at_start_of_incremental_(0),
33       bytes_rescanned_(0),
34       should_hurry_(false),
35       marking_speed_(0),
36       bytes_scanned_(0),
37       allocated_(0),
38       write_barriers_invoked_since_last_step_(0),
39       idle_marking_delay_counter_(0),
40       no_marking_scope_depth_(0),
41       unscanned_bytes_of_large_object_(0),
42       was_activated_(false),
43       weak_closure_was_overapproximated_(false),
44       weak_closure_approximation_rounds_(0),
45       request_type_(COMPLETE_MARKING),
46       gc_callback_flags_(kNoGCCallbackFlags) {}
47
48
49 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
50                                          Object* value) {
51   if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
52     MarkBit obj_bit = Marking::MarkBitFrom(obj);
53     if (Marking::IsBlack(obj_bit)) {
54       // Object is not going to be rescanned we need to record the slot.
55       heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
56                                                   slot, value);
57     }
58   }
59 }
60
61
62 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
63                                              Isolate* isolate) {
64   DCHECK(obj->IsHeapObject());
65   IncrementalMarking* marking = isolate->heap()->incremental_marking();
66
67   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
68   int counter = chunk->write_barrier_counter();
69   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
70     marking->write_barriers_invoked_since_last_step_ +=
71         MemoryChunk::kWriteBarrierCounterGranularity -
72         chunk->write_barrier_counter();
73     chunk->set_write_barrier_counter(
74         MemoryChunk::kWriteBarrierCounterGranularity);
75   }
76
77   marking->RecordWrite(obj, slot, *slot);
78 }
79
80
81 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
82                                                HeapObject* value) {
83   if (IsMarking()) {
84     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
85     RecordWriteIntoCode(host, &rinfo, value);
86   }
87 }
88
89
90 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
91   if (IsMarking()) {
92     Code* host = heap_->isolate()
93                      ->inner_pointer_to_code_cache()
94                      ->GcSafeFindCodeForInnerPointer(pc);
95     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
96     RecordWriteIntoCode(host, &rinfo, value);
97   }
98 }
99
100
101 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
102                                                     Object** slot,
103                                                     Code* value) {
104   if (BaseRecordWrite(host, slot, value)) {
105     DCHECK(slot != NULL);
106     heap_->mark_compact_collector()->RecordCodeEntrySlot(
107         reinterpret_cast<Address>(slot), value);
108   }
109 }
110
111
112 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
113                                                  RelocInfo* rinfo,
114                                                  Object* value) {
115   MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
116   if (Marking::IsWhite(value_bit)) {
117     MarkBit obj_bit = Marking::MarkBitFrom(obj);
118     if (Marking::IsBlack(obj_bit)) {
119       BlackToGreyAndUnshift(obj, obj_bit);
120       RestartIfNotMarking();
121     }
122     // Object is either grey or white.  It will be scanned if survives.
123     return;
124   }
125
126   if (is_compacting_) {
127     MarkBit obj_bit = Marking::MarkBitFrom(obj);
128     if (Marking::IsBlack(obj_bit)) {
129       // Object is not going to be rescanned.  We need to record the slot.
130       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
131                                                        Code::cast(value));
132     }
133   }
134 }
135
136
137 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
138   if (obj->IsHeapObject()) {
139     HeapObject* heap_obj = HeapObject::cast(obj);
140     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
141     if (Marking::IsBlack(mark_bit)) {
142       MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
143                                             -heap_obj->Size());
144     }
145     Marking::AnyToGrey(mark_bit);
146   }
147 }
148
149
150 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
151                                         MarkBit mark_bit, int size) {
152   DCHECK(!Marking::IsImpossible(mark_bit));
153   if (Marking::IsBlack(mark_bit)) return;
154   Marking::MarkBlack(mark_bit);
155   MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
156 }
157
158
159 class IncrementalMarkingMarkingVisitor
160     : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
161  public:
162   static void Initialize() {
163     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
164     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
165     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
166     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
167   }
168
169   static const int kProgressBarScanningChunk = 32 * 1024;
170
171   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
172     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
173     // TODO(mstarzinger): Move setting of the flag to the allocation site of
174     // the array. The visitor should just check the flag.
175     if (FLAG_use_marking_progress_bar &&
176         chunk->owner()->identity() == LO_SPACE) {
177       chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
178     }
179     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
180       Heap* heap = map->GetHeap();
181       // When using a progress bar for large fixed arrays, scan only a chunk of
182       // the array and try to push it onto the marking deque again until it is
183       // fully scanned. Fall back to scanning it through to the end in case this
184       // fails because of a full deque.
185       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
186       int start_offset =
187           Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
188       int end_offset =
189           Min(object_size, start_offset + kProgressBarScanningChunk);
190       int already_scanned_offset = start_offset;
191       bool scan_until_end = false;
192       do {
193         VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
194                                 HeapObject::RawField(object, start_offset),
195                                 HeapObject::RawField(object, end_offset));
196         start_offset = end_offset;
197         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
198         scan_until_end =
199             heap->mark_compact_collector()->marking_deque()->IsFull();
200       } while (scan_until_end && start_offset < object_size);
201       chunk->set_progress_bar(start_offset);
202       if (start_offset < object_size) {
203         if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
204           heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
205         } else {
206           DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
207           heap->mark_compact_collector()->marking_deque()->UnshiftBlack(object);
208         }
209         heap->incremental_marking()->NotifyIncompleteScanOfObject(
210             object_size - (start_offset - already_scanned_offset));
211       }
212     } else {
213       FixedArrayVisitor::Visit(map, object);
214     }
215   }
216
217   static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
218     Context* context = Context::cast(object);
219
220     // We will mark cache black with a separate pass when we finish marking.
221     // Note that GC can happen when the context is not fully initialized,
222     // so the cache can be undefined.
223     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
224     if (!cache->IsUndefined()) {
225       MarkObjectGreyDoNotEnqueue(cache);
226     }
227     VisitNativeContext(map, context);
228   }
229
230   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
231     Object* obj = *p;
232     if (obj->IsHeapObject()) {
233       heap->mark_compact_collector()->RecordSlot(p, p, obj);
234       MarkObject(heap, obj);
235     }
236   }
237
238   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
239     for (Object** p = start; p < end; p++) {
240       Object* obj = *p;
241       if (obj->IsHeapObject()) {
242         heap->mark_compact_collector()->RecordSlot(start, p, obj);
243         MarkObject(heap, obj);
244       }
245     }
246   }
247
248   INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
249                                              Object** start, Object** end)) {
250     for (Object** p = start; p < end; p++) {
251       Object* obj = *p;
252       if (obj->IsHeapObject()) {
253         heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
254         MarkObject(heap, obj);
255       }
256     }
257   }
258
259   // Marks the object grey and pushes it on the marking stack.
260   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
261     IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
262   }
263
264   // Marks the object black without pushing it on the marking stack.
265   // Returns true if object needed marking and false otherwise.
266   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
267     HeapObject* heap_object = HeapObject::cast(obj);
268     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
269     if (Marking::IsWhite(mark_bit)) {
270       Marking::MarkBlack(mark_bit);
271       MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
272                                             heap_object->Size());
273       return true;
274     }
275     return false;
276   }
277 };
278
279
280 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
281  public:
282   explicit IncrementalMarkingRootMarkingVisitor(
283       IncrementalMarking* incremental_marking)
284       : heap_(incremental_marking->heap()) {}
285
286   void VisitPointer(Object** p) { MarkObjectByPointer(p); }
287
288   void VisitPointers(Object** start, Object** end) {
289     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
290   }
291
292  private:
293   void MarkObjectByPointer(Object** p) {
294     Object* obj = *p;
295     if (!obj->IsHeapObject()) return;
296
297     IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
298   }
299
300   Heap* heap_;
301 };
302
303
304 void IncrementalMarking::Initialize() {
305   IncrementalMarkingMarkingVisitor::Initialize();
306 }
307
308
309 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
310                                               bool is_marking,
311                                               bool is_compacting) {
312   if (is_marking) {
313     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
314     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
315
316     // It's difficult to filter out slots recorded for large objects.
317     if (chunk->owner()->identity() == LO_SPACE &&
318         chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
319       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
320     }
321   } else {
322     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
323     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
324   }
325 }
326
327
328 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
329                                               bool is_marking) {
330   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
331   if (is_marking) {
332     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
333   } else {
334     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
335   }
336   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
337 }
338
339
340 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
341     PagedSpace* space) {
342   PageIterator it(space);
343   while (it.has_next()) {
344     Page* p = it.next();
345     SetOldSpacePageFlags(p, false, false);
346   }
347 }
348
349
350 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
351     NewSpace* space) {
352   NewSpacePageIterator it(space);
353   while (it.has_next()) {
354     NewSpacePage* p = it.next();
355     SetNewSpacePageFlags(p, false);
356   }
357 }
358
359
360 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
361   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
362   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
363   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
364   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
365
366   LargePage* lop = heap_->lo_space()->first_page();
367   while (LargePage::IsValid(lop)) {
368     SetOldSpacePageFlags(lop, false, false);
369     lop = lop->next_page();
370   }
371 }
372
373
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
375   PageIterator it(space);
376   while (it.has_next()) {
377     Page* p = it.next();
378     SetOldSpacePageFlags(p, true, is_compacting_);
379   }
380 }
381
382
383 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
384   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
385   while (it.has_next()) {
386     NewSpacePage* p = it.next();
387     SetNewSpacePageFlags(p, true);
388   }
389 }
390
391
392 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
393   ActivateIncrementalWriteBarrier(heap_->old_space());
394   ActivateIncrementalWriteBarrier(heap_->map_space());
395   ActivateIncrementalWriteBarrier(heap_->code_space());
396   ActivateIncrementalWriteBarrier(heap_->new_space());
397
398   LargePage* lop = heap_->lo_space()->first_page();
399   while (LargePage::IsValid(lop)) {
400     SetOldSpacePageFlags(lop, true, is_compacting_);
401     lop = lop->next_page();
402   }
403 }
404
405
406 bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
407   return CanBeActivated() &&
408          heap_->HeapIsFullEnoughToStartIncrementalMarking(
409              heap_->old_generation_allocation_limit());
410 }
411
412
413 bool IncrementalMarking::WasActivated() { return was_activated_; }
414
415
416 bool IncrementalMarking::CanBeActivated() {
417 #ifndef DEBUG
418   static const intptr_t kActivationThreshold = 8 * MB;
419 #else
420   // TODO(gc) consider setting this to some low level so that some
421   // debug tests run with incremental marking and some without.
422   static const intptr_t kActivationThreshold = 0;
423 #endif
424   // Only start incremental marking in a safe state: 1) when incremental
425   // marking is turned on, 2) when we are currently not in a GC, and
426   // 3) when we are currently not serializing or deserializing the heap.
427   // Don't switch on for very small heaps.
428   return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
429          heap_->gc_state() == Heap::NOT_IN_GC &&
430          heap_->deserialization_complete() &&
431          !heap_->isolate()->serializer_enabled() &&
432          heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
433 }
434
435
436 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
437   DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
438
439   if (!IsMarking()) {
440     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
441     // we don't need to do anything if incremental marking is
442     // not active.
443   } else if (IsCompacting()) {
444     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
445   } else {
446     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
447   }
448 }
449
450
451 void IncrementalMarking::NotifyOfHighPromotionRate() {
452   if (IsMarking()) {
453     if (marking_speed_ < kFastMarking) {
454       if (FLAG_trace_gc) {
455         PrintIsolate(heap()->isolate(),
456                      "Increasing marking speed to %d "
457                      "due to high promotion rate\n",
458                      static_cast<int>(kFastMarking));
459       }
460       marking_speed_ = kFastMarking;
461     }
462   }
463 }
464
465
466 static void PatchIncrementalMarkingRecordWriteStubs(
467     Heap* heap, RecordWriteStub::Mode mode) {
468   UnseededNumberDictionary* stubs = heap->code_stubs();
469
470   int capacity = stubs->Capacity();
471   for (int i = 0; i < capacity; i++) {
472     Object* k = stubs->KeyAt(i);
473     if (stubs->IsKey(k)) {
474       uint32_t key = NumberToUint32(k);
475
476       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
477         Object* e = stubs->ValueAt(i);
478         if (e->IsCode()) {
479           RecordWriteStub::Patch(Code::cast(e), mode);
480         }
481       }
482     }
483   }
484 }
485
486
487 void IncrementalMarking::Start(int mark_compact_flags,
488                                const GCCallbackFlags gc_callback_flags,
489                                const char* reason) {
490   if (FLAG_trace_incremental_marking) {
491     PrintF("[IncrementalMarking] Start (%s)\n",
492            (reason == nullptr) ? "unknown reason" : reason);
493   }
494   DCHECK(FLAG_incremental_marking);
495   DCHECK(FLAG_incremental_marking_steps);
496   DCHECK(state_ == STOPPED);
497   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
498   DCHECK(!heap_->isolate()->serializer_enabled());
499
500   ResetStepCounters();
501
502   gc_callback_flags_ = gc_callback_flags;
503   was_activated_ = true;
504
505   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
506     heap_->mark_compact_collector()->SetFlags(mark_compact_flags);
507     StartMarking();
508     heap_->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
509   } else {
510     if (FLAG_trace_incremental_marking) {
511       PrintF("[IncrementalMarking] Start sweeping.\n");
512     }
513     state_ = SWEEPING;
514   }
515
516   heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
517 }
518
519
520 void IncrementalMarking::StartMarking() {
521   if (FLAG_trace_incremental_marking) {
522     PrintF("[IncrementalMarking] Start marking\n");
523   }
524
525   is_compacting_ = !FLAG_never_compact &&
526                    heap_->mark_compact_collector()->StartCompaction(
527                        MarkCompactCollector::INCREMENTAL_COMPACTION);
528
529   state_ = MARKING;
530
531   RecordWriteStub::Mode mode = is_compacting_
532                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
533                                    : RecordWriteStub::INCREMENTAL;
534
535   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
536
537   heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
538       MarkCompactCollector::kMaxMarkingDequeSize);
539
540   ActivateIncrementalWriteBarrier();
541
542 // Marking bits are cleared by the sweeper.
543 #ifdef VERIFY_HEAP
544   if (FLAG_verify_heap) {
545     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
546   }
547 #endif
548
549   heap_->CompletelyClearInstanceofCache();
550   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
551
552   if (FLAG_cleanup_code_caches_at_gc) {
553     // We will mark cache black with a separate pass
554     // when we finish marking.
555     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
556   }
557
558   // Mark strong roots grey.
559   IncrementalMarkingRootMarkingVisitor visitor(this);
560   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
561
562   // Ready to start incremental marking.
563   if (FLAG_trace_incremental_marking) {
564     PrintF("[IncrementalMarking] Running\n");
565   }
566 }
567
568
569 void IncrementalMarking::MarkObjectGroups() {
570   DCHECK(FLAG_overapproximate_weak_closure);
571   DCHECK(!weak_closure_was_overapproximated_);
572
573   int old_marking_deque_top =
574       heap_->mark_compact_collector()->marking_deque()->top();
575
576   heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
577
578   IncrementalMarkingRootMarkingVisitor visitor(this);
579   heap_->isolate()->global_handles()->IterateObjectGroups(
580       &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
581
582   int marking_progress =
583       abs(old_marking_deque_top -
584           heap_->mark_compact_collector()->marking_deque()->top());
585
586   ++weak_closure_approximation_rounds_;
587   if ((weak_closure_approximation_rounds_ >=
588        FLAG_max_object_groups_marking_rounds) ||
589       (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
590     weak_closure_was_overapproximated_ = true;
591   }
592
593   heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
594   heap_->isolate()->global_handles()->RemoveObjectGroups();
595 }
596
597
598 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
599   if (!IsMarking()) return;
600
601   MarkingDeque* marking_deque =
602       heap_->mark_compact_collector()->marking_deque();
603   int current = marking_deque->bottom();
604   int mask = marking_deque->mask();
605   int limit = marking_deque->top();
606   HeapObject** array = marking_deque->array();
607   int new_top = current;
608
609   Map* filler_map = heap_->one_pointer_filler_map();
610
611   while (current != limit) {
612     HeapObject* obj = array[current];
613     DCHECK(obj->IsHeapObject());
614     current = ((current + 1) & mask);
615     if (heap_->InNewSpace(obj)) {
616       MapWord map_word = obj->map_word();
617       if (map_word.IsForwardingAddress()) {
618         HeapObject* dest = map_word.ToForwardingAddress();
619         array[new_top] = dest;
620         new_top = ((new_top + 1) & mask);
621         DCHECK(new_top != marking_deque->bottom());
622 #ifdef DEBUG
623         MarkBit mark_bit = Marking::MarkBitFrom(obj);
624         DCHECK(Marking::IsGrey(mark_bit) ||
625                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
626 #endif
627       }
628     } else if (obj->map() != filler_map) {
629       // Skip one word filler objects that appear on the
630       // stack when we perform in place array shift.
631       array[new_top] = obj;
632       new_top = ((new_top + 1) & mask);
633       DCHECK(new_top != marking_deque->bottom());
634 #ifdef DEBUG
635       MarkBit mark_bit = Marking::MarkBitFrom(obj);
636       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
637       DCHECK(Marking::IsGrey(mark_bit) ||
638              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
639              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
640               Marking::IsBlack(mark_bit)));
641 #endif
642     }
643   }
644   marking_deque->set_top(new_top);
645 }
646
647
648 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
649   MarkObject(heap_, map);
650
651   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
652
653   MarkBit mark_bit = Marking::MarkBitFrom(obj);
654 #if ENABLE_SLOW_DCHECKS
655   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
656   SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
657               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
658               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
659                Marking::IsBlack(mark_bit)));
660 #endif
661   MarkBlackOrKeepBlack(obj, mark_bit, size);
662 }
663
664
665 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
666   MarkBit mark_bit = Marking::MarkBitFrom(obj);
667   if (Marking::IsWhite(mark_bit)) {
668     heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
669   }
670 }
671
672
673 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
674   intptr_t bytes_processed = 0;
675   Map* filler_map = heap_->one_pointer_filler_map();
676   MarkingDeque* marking_deque =
677       heap_->mark_compact_collector()->marking_deque();
678   while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
679     HeapObject* obj = marking_deque->Pop();
680
681     // Explicitly skip one word fillers. Incremental markbit patterns are
682     // correct only for objects that occupy at least two words.
683     Map* map = obj->map();
684     if (map == filler_map) continue;
685
686     int size = obj->SizeFromMap(map);
687     unscanned_bytes_of_large_object_ = 0;
688     VisitObject(map, obj, size);
689     bytes_processed += size - unscanned_bytes_of_large_object_;
690   }
691   return bytes_processed;
692 }
693
694
695 void IncrementalMarking::ProcessMarkingDeque() {
696   Map* filler_map = heap_->one_pointer_filler_map();
697   MarkingDeque* marking_deque =
698       heap_->mark_compact_collector()->marking_deque();
699   while (!marking_deque->IsEmpty()) {
700     HeapObject* obj = marking_deque->Pop();
701
702     // Explicitly skip one word fillers. Incremental markbit patterns are
703     // correct only for objects that occupy at least two words.
704     Map* map = obj->map();
705     if (map == filler_map) continue;
706
707     VisitObject(map, obj, obj->SizeFromMap(map));
708   }
709 }
710
711
712 void IncrementalMarking::Hurry() {
713   if (state() == MARKING) {
714     double start = 0.0;
715     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
716       start = base::OS::TimeCurrentMillis();
717       if (FLAG_trace_incremental_marking) {
718         PrintF("[IncrementalMarking] Hurry\n");
719       }
720     }
721     // TODO(gc) hurry can mark objects it encounters black as mutator
722     // was stopped.
723     ProcessMarkingDeque();
724     state_ = COMPLETE;
725     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
726       double end = base::OS::TimeCurrentMillis();
727       double delta = end - start;
728       heap_->tracer()->AddMarkingTime(delta);
729       if (FLAG_trace_incremental_marking) {
730         PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
731                static_cast<int>(delta));
732       }
733     }
734   }
735
736   if (FLAG_cleanup_code_caches_at_gc) {
737     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
738     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
739     MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
740                                           PolymorphicCodeCache::kSize);
741   }
742
743   Object* context = heap_->native_contexts_list();
744   while (!context->IsUndefined()) {
745     // GC can happen when the context is not fully initialized,
746     // so the cache can be undefined.
747     HeapObject* cache = HeapObject::cast(
748         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
749     if (!cache->IsUndefined()) {
750       MarkBit mark_bit = Marking::MarkBitFrom(cache);
751       if (Marking::IsGrey(mark_bit)) {
752         Marking::GreyToBlack(mark_bit);
753         MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
754       }
755     }
756     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
757   }
758 }
759
760
761 void IncrementalMarking::Abort() {
762   if (IsStopped()) return;
763   if (FLAG_trace_incremental_marking) {
764     PrintF("[IncrementalMarking] Aborting.\n");
765   }
766   heap_->new_space()->LowerInlineAllocationLimit(0);
767   IncrementalMarking::set_should_hurry(false);
768   ResetStepCounters();
769   if (IsMarking()) {
770     PatchIncrementalMarkingRecordWriteStubs(heap_,
771                                             RecordWriteStub::STORE_BUFFER_ONLY);
772     DeactivateIncrementalWriteBarrier();
773
774     if (is_compacting_) {
775       LargeObjectIterator it(heap_->lo_space());
776       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
777         Page* p = Page::FromAddress(obj->address());
778         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
779           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
780         }
781       }
782     }
783   }
784   heap_->isolate()->stack_guard()->ClearGC();
785   state_ = STOPPED;
786   is_compacting_ = false;
787 }
788
789
790 void IncrementalMarking::Finalize() {
791   Hurry();
792   state_ = STOPPED;
793   is_compacting_ = false;
794   heap_->new_space()->LowerInlineAllocationLimit(0);
795   IncrementalMarking::set_should_hurry(false);
796   ResetStepCounters();
797   PatchIncrementalMarkingRecordWriteStubs(heap_,
798                                           RecordWriteStub::STORE_BUFFER_ONLY);
799   DeactivateIncrementalWriteBarrier();
800   DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
801   heap_->isolate()->stack_guard()->ClearGC();
802 }
803
804
805 void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
806   DCHECK(FLAG_overapproximate_weak_closure);
807   DCHECK(!weak_closure_was_overapproximated_);
808   if (FLAG_trace_incremental_marking) {
809     PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
810   }
811   request_type_ = OVERAPPROXIMATION;
812   if (action == GC_VIA_STACK_GUARD) {
813     heap_->isolate()->stack_guard()->RequestGC();
814   }
815 }
816
817
818 void IncrementalMarking::MarkingComplete(CompletionAction action) {
819   state_ = COMPLETE;
820   // We will set the stack guard to request a GC now.  This will mean the rest
821   // of the GC gets performed as soon as possible (we can't do a GC here in a
822   // record-write context).  If a few things get allocated between now and then
823   // that shouldn't make us do a scavenge and keep being incremental, so we set
824   // the should-hurry flag to indicate that there can't be much work left to do.
825   set_should_hurry(true);
826   if (FLAG_trace_incremental_marking) {
827     PrintF("[IncrementalMarking] Complete (normal).\n");
828   }
829   request_type_ = COMPLETE_MARKING;
830   if (action == GC_VIA_STACK_GUARD) {
831     heap_->isolate()->stack_guard()->RequestGC();
832   }
833 }
834
835
836 void IncrementalMarking::Epilogue() {
837   was_activated_ = false;
838   weak_closure_was_overapproximated_ = false;
839   weak_closure_approximation_rounds_ = 0;
840 }
841
842
843 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
844   if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
845     Start(Heap::kNoGCFlags, kNoGCCallbackFlags, "old space step");
846   } else {
847     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
848   }
849 }
850
851
852 void IncrementalMarking::SpeedUp() {
853   bool speed_up = false;
854
855   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
856     if (FLAG_trace_incremental_marking) {
857       PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
858                    static_cast<int>(kMarkingSpeedAccellerationInterval));
859     }
860     speed_up = true;
861   }
862
863   bool space_left_is_very_small =
864       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
865
866   bool only_1_nth_of_space_that_was_available_still_left =
867       (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
868        old_generation_space_available_at_start_of_incremental_);
869
870   if (space_left_is_very_small ||
871       only_1_nth_of_space_that_was_available_still_left) {
872     if (FLAG_trace_incremental_marking)
873       PrintIsolate(heap()->isolate(),
874                    "Speed up marking because of low space left\n");
875     speed_up = true;
876   }
877
878   bool size_of_old_space_multiplied_by_n_during_marking =
879       (heap_->PromotedTotalSize() >
880        (marking_speed_ + 1) *
881            old_generation_space_used_at_start_of_incremental_);
882   if (size_of_old_space_multiplied_by_n_during_marking) {
883     speed_up = true;
884     if (FLAG_trace_incremental_marking) {
885       PrintIsolate(heap()->isolate(),
886                    "Speed up marking because of heap size increase\n");
887     }
888   }
889
890   int64_t promoted_during_marking =
891       heap_->PromotedTotalSize() -
892       old_generation_space_used_at_start_of_incremental_;
893   intptr_t delay = marking_speed_ * MB;
894   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
895
896   // We try to scan at at least twice the speed that we are allocating.
897   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
898     if (FLAG_trace_incremental_marking) {
899       PrintIsolate(heap()->isolate(),
900                    "Speed up marking because marker was not keeping up\n");
901     }
902     speed_up = true;
903   }
904
905   if (speed_up) {
906     if (state_ != MARKING) {
907       if (FLAG_trace_incremental_marking) {
908         PrintIsolate(heap()->isolate(),
909                      "Postponing speeding up marking until marking starts\n");
910       }
911     } else {
912       marking_speed_ += kMarkingSpeedAccelleration;
913       marking_speed_ = static_cast<int>(
914           Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
915       if (FLAG_trace_incremental_marking) {
916         PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
917                      marking_speed_);
918       }
919     }
920   }
921 }
922
923
924 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
925                                   CompletionAction action,
926                                   ForceMarkingAction marking,
927                                   ForceCompletionAction completion) {
928   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
929       !FLAG_incremental_marking_steps ||
930       (state_ != SWEEPING && state_ != MARKING)) {
931     return 0;
932   }
933
934   allocated_ += allocated_bytes;
935
936   if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
937       write_barriers_invoked_since_last_step_ <
938           kWriteBarriersInvokedThreshold) {
939     return 0;
940   }
941
942   // If an idle notification happened recently, we delay marking steps.
943   if (marking == DO_NOT_FORCE_MARKING &&
944       heap_->RecentIdleNotificationHappened()) {
945     return 0;
946   }
947
948   if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
949
950   intptr_t bytes_processed = 0;
951   {
952     HistogramTimerScope incremental_marking_scope(
953         heap_->isolate()->counters()->gc_incremental_marking());
954     double start = base::OS::TimeCurrentMillis();
955
956     // The marking speed is driven either by the allocation rate or by the rate
957     // at which we are having to check the color of objects in the write
958     // barrier.
959     // It is possible for a tight non-allocating loop to run a lot of write
960     // barriers before we get here and check them (marking can only take place
961     // on
962     // allocation), so to reduce the lumpiness we don't use the write barriers
963     // invoked since last step directly to determine the amount of work to do.
964     intptr_t bytes_to_process =
965         marking_speed_ *
966         Max(allocated_, write_barriers_invoked_since_last_step_);
967     allocated_ = 0;
968     write_barriers_invoked_since_last_step_ = 0;
969
970     bytes_scanned_ += bytes_to_process;
971
972     if (state_ == SWEEPING) {
973       if (heap_->mark_compact_collector()->sweeping_in_progress() &&
974           (heap_->mark_compact_collector()->IsSweepingCompleted() ||
975            !heap_->concurrent_sweeping_enabled())) {
976         heap_->mark_compact_collector()->EnsureSweepingCompleted();
977       }
978       if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
979         bytes_scanned_ = 0;
980         StartMarking();
981       }
982     } else if (state_ == MARKING) {
983       bytes_processed = ProcessMarkingDeque(bytes_to_process);
984       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
985         if (completion == FORCE_COMPLETION ||
986             IsIdleMarkingDelayCounterLimitReached()) {
987           if (FLAG_overapproximate_weak_closure &&
988               !weak_closure_was_overapproximated_) {
989             OverApproximateWeakClosure(action);
990           } else {
991             MarkingComplete(action);
992           }
993         } else {
994           IncrementIdleMarkingDelayCounter();
995         }
996       }
997     }
998
999     steps_count_++;
1000
1001     // Speed up marking if we are marking too slow or if we are almost done
1002     // with marking.
1003     SpeedUp();
1004
1005     double end = base::OS::TimeCurrentMillis();
1006     double duration = (end - start);
1007     // Note that we report zero bytes here when sweeping was in progress or
1008     // when we just started incremental marking. In these cases we did not
1009     // process the marking deque.
1010     heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1011   }
1012   return bytes_processed;
1013 }
1014
1015
1016 void IncrementalMarking::ResetStepCounters() {
1017   steps_count_ = 0;
1018   old_generation_space_available_at_start_of_incremental_ =
1019       SpaceLeftInOldSpace();
1020   old_generation_space_used_at_start_of_incremental_ =
1021       heap_->PromotedTotalSize();
1022   bytes_rescanned_ = 0;
1023   marking_speed_ = kInitialMarkingSpeed;
1024   bytes_scanned_ = 0;
1025   write_barriers_invoked_since_last_step_ = 0;
1026 }
1027
1028
1029 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1030   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1031 }
1032
1033
1034 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1035   return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1036 }
1037
1038
1039 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1040   idle_marking_delay_counter_++;
1041 }
1042
1043
1044 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1045   idle_marking_delay_counter_ = 0;
1046 }
1047 }  // namespace internal
1048 }  // namespace v8