deps: update v8 to 4.3.61.21
[platform/upstream/nodejs.git] / deps / v8 / src / heap / incremental-marking.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #include "src/heap/incremental-marking.h"
8
9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h"
11 #include "src/conversions.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/objects-visiting-inl.h"
14
15 namespace v8 {
16 namespace internal {
17
18
19 IncrementalMarking::IncrementalMarking(Heap* heap)
20     : heap_(heap),
21       state_(STOPPED),
22       steps_count_(0),
23       old_generation_space_available_at_start_of_incremental_(0),
24       old_generation_space_used_at_start_of_incremental_(0),
25       should_hurry_(false),
26       marking_speed_(0),
27       allocated_(0),
28       idle_marking_delay_counter_(0),
29       no_marking_scope_depth_(0),
30       unscanned_bytes_of_large_object_(0),
31       was_activated_(false),
32       weak_closure_was_overapproximated_(false),
33       weak_closure_approximation_rounds_(0),
34       request_type_(COMPLETE_MARKING) {}
35
36
37 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
38                                          Object* value) {
39   if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
40     MarkBit obj_bit = Marking::MarkBitFrom(obj);
41     if (Marking::IsBlack(obj_bit)) {
42       // Object is not going to be rescanned we need to record the slot.
43       heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
44                                                   slot, value);
45     }
46   }
47 }
48
49
50 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
51                                              Isolate* isolate) {
52   DCHECK(obj->IsHeapObject());
53   IncrementalMarking* marking = isolate->heap()->incremental_marking();
54
55   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
56   int counter = chunk->write_barrier_counter();
57   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
58     marking->write_barriers_invoked_since_last_step_ +=
59         MemoryChunk::kWriteBarrierCounterGranularity -
60         chunk->write_barrier_counter();
61     chunk->set_write_barrier_counter(
62         MemoryChunk::kWriteBarrierCounterGranularity);
63   }
64
65   marking->RecordWrite(obj, slot, *slot);
66 }
67
68
69 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
70                                                HeapObject* value) {
71   if (IsMarking()) {
72     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
73     RecordWriteIntoCode(host, &rinfo, value);
74   }
75 }
76
77
78 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
79   if (IsMarking()) {
80     Code* host = heap_->isolate()
81                      ->inner_pointer_to_code_cache()
82                      ->GcSafeFindCodeForInnerPointer(pc);
83     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
84     RecordWriteIntoCode(host, &rinfo, value);
85   }
86 }
87
88
89 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
90                                                     Object** slot,
91                                                     Code* value) {
92   if (BaseRecordWrite(host, slot, value)) {
93     DCHECK(slot != NULL);
94     heap_->mark_compact_collector()->RecordCodeEntrySlot(
95         reinterpret_cast<Address>(slot), value);
96   }
97 }
98
99
100 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
101                                                  RelocInfo* rinfo,
102                                                  Object* value) {
103   MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
104   if (Marking::IsWhite(value_bit)) {
105     MarkBit obj_bit = Marking::MarkBitFrom(obj);
106     if (Marking::IsBlack(obj_bit)) {
107       BlackToGreyAndUnshift(obj, obj_bit);
108       RestartIfNotMarking();
109     }
110     // Object is either grey or white.  It will be scanned if survives.
111     return;
112   }
113
114   if (is_compacting_) {
115     MarkBit obj_bit = Marking::MarkBitFrom(obj);
116     if (Marking::IsBlack(obj_bit)) {
117       // Object is not going to be rescanned.  We need to record the slot.
118       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
119                                                        Code::cast(value));
120     }
121   }
122 }
123
124
125 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
126   if (obj->IsHeapObject()) {
127     HeapObject* heap_obj = HeapObject::cast(obj);
128     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
129     if (Marking::IsBlack(mark_bit)) {
130       MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
131                                             -heap_obj->Size());
132     }
133     Marking::AnyToGrey(mark_bit);
134   }
135 }
136
137
138 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
139                                        MarkBit mark_bit, int size) {
140   DCHECK(!Marking::IsImpossible(mark_bit));
141   if (mark_bit.Get()) return;
142   mark_bit.Set();
143   MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
144   DCHECK(Marking::IsBlack(mark_bit));
145 }
146
147
148 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
149                                         MarkBit mark_bit, int size) {
150   DCHECK(!Marking::IsImpossible(mark_bit));
151   if (Marking::IsBlack(mark_bit)) return;
152   Marking::MarkBlack(mark_bit);
153   MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
154   DCHECK(Marking::IsBlack(mark_bit));
155 }
156
157
158 class IncrementalMarkingMarkingVisitor
159     : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
160  public:
161   static void Initialize() {
162     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
163     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
164     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
165     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
166   }
167
168   static const int kProgressBarScanningChunk = 32 * 1024;
169
170   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
171     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
172     // TODO(mstarzinger): Move setting of the flag to the allocation site of
173     // the array. The visitor should just check the flag.
174     if (FLAG_use_marking_progress_bar &&
175         chunk->owner()->identity() == LO_SPACE) {
176       chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
177     }
178     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
179       Heap* heap = map->GetHeap();
180       // When using a progress bar for large fixed arrays, scan only a chunk of
181       // the array and try to push it onto the marking deque again until it is
182       // fully scanned. Fall back to scanning it through to the end in case this
183       // fails because of a full deque.
184       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
185       int start_offset =
186           Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
187       int end_offset =
188           Min(object_size, start_offset + kProgressBarScanningChunk);
189       int already_scanned_offset = start_offset;
190       bool scan_until_end = false;
191       do {
192         VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
193                                 HeapObject::RawField(object, start_offset),
194                                 HeapObject::RawField(object, end_offset));
195         start_offset = end_offset;
196         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
197         scan_until_end =
198             heap->mark_compact_collector()->marking_deque()->IsFull();
199       } while (scan_until_end && start_offset < object_size);
200       chunk->set_progress_bar(start_offset);
201       if (start_offset < object_size) {
202         heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
203         heap->incremental_marking()->NotifyIncompleteScanOfObject(
204             object_size - (start_offset - already_scanned_offset));
205       }
206     } else {
207       FixedArrayVisitor::Visit(map, object);
208     }
209   }
210
211   static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
212     Context* context = Context::cast(object);
213
214     // We will mark cache black with a separate pass when we finish marking.
215     // Note that GC can happen when the context is not fully initialized,
216     // so the cache can be undefined.
217     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
218     if (!cache->IsUndefined()) {
219       MarkObjectGreyDoNotEnqueue(cache);
220     }
221     VisitNativeContext(map, context);
222   }
223
224   INLINE(static void VisitPointer(Heap* heap, Object** p)) {
225     Object* obj = *p;
226     if (obj->IsHeapObject()) {
227       heap->mark_compact_collector()->RecordSlot(p, p, obj);
228       MarkObject(heap, obj);
229     }
230   }
231
232   INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
233     for (Object** p = start; p < end; p++) {
234       Object* obj = *p;
235       if (obj->IsHeapObject()) {
236         heap->mark_compact_collector()->RecordSlot(start, p, obj);
237         MarkObject(heap, obj);
238       }
239     }
240   }
241
242   INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
243                                              Object** start, Object** end)) {
244     for (Object** p = start; p < end; p++) {
245       Object* obj = *p;
246       if (obj->IsHeapObject()) {
247         heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
248         MarkObject(heap, obj);
249       }
250     }
251   }
252
253   // Marks the object grey and pushes it on the marking stack.
254   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
255     IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
256   }
257
258   // Marks the object black without pushing it on the marking stack.
259   // Returns true if object needed marking and false otherwise.
260   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
261     HeapObject* heap_object = HeapObject::cast(obj);
262     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
263     if (Marking::IsWhite(mark_bit)) {
264       mark_bit.Set();
265       MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
266                                             heap_object->Size());
267       return true;
268     }
269     return false;
270   }
271 };
272
273
274 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
275  public:
276   explicit IncrementalMarkingRootMarkingVisitor(
277       IncrementalMarking* incremental_marking)
278       : heap_(incremental_marking->heap()) {}
279
280   void VisitPointer(Object** p) { MarkObjectByPointer(p); }
281
282   void VisitPointers(Object** start, Object** end) {
283     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
284   }
285
286  private:
287   void MarkObjectByPointer(Object** p) {
288     Object* obj = *p;
289     if (!obj->IsHeapObject()) return;
290
291     IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
292   }
293
294   Heap* heap_;
295 };
296
297
298 void IncrementalMarking::Initialize() {
299   IncrementalMarkingMarkingVisitor::Initialize();
300 }
301
302
303 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
304                                               bool is_marking,
305                                               bool is_compacting) {
306   if (is_marking) {
307     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
308     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
309
310     // It's difficult to filter out slots recorded for large objects.
311     if (chunk->owner()->identity() == LO_SPACE &&
312         chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
313       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
314     }
315   } else if (chunk->owner()->identity() == CELL_SPACE ||
316              chunk->scan_on_scavenge()) {
317     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
318     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
319   } else {
320     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
322   }
323 }
324
325
326 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
327                                               bool is_marking) {
328   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
329   if (is_marking) {
330     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
331   } else {
332     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
333   }
334   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
335 }
336
337
338 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
339     PagedSpace* space) {
340   PageIterator it(space);
341   while (it.has_next()) {
342     Page* p = it.next();
343     SetOldSpacePageFlags(p, false, false);
344   }
345 }
346
347
348 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
349     NewSpace* space) {
350   NewSpacePageIterator it(space);
351   while (it.has_next()) {
352     NewSpacePage* p = it.next();
353     SetNewSpacePageFlags(p, false);
354   }
355 }
356
357
358 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
359   DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
360   DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
361   DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
362   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
363   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
364   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
365
366   LargePage* lop = heap_->lo_space()->first_page();
367   while (lop->is_valid()) {
368     SetOldSpacePageFlags(lop, false, false);
369     lop = lop->next_page();
370   }
371 }
372
373
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
375   PageIterator it(space);
376   while (it.has_next()) {
377     Page* p = it.next();
378     SetOldSpacePageFlags(p, true, is_compacting_);
379   }
380 }
381
382
383 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
384   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
385   while (it.has_next()) {
386     NewSpacePage* p = it.next();
387     SetNewSpacePageFlags(p, true);
388   }
389 }
390
391
392 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
393   ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
394   ActivateIncrementalWriteBarrier(heap_->old_data_space());
395   ActivateIncrementalWriteBarrier(heap_->cell_space());
396   ActivateIncrementalWriteBarrier(heap_->map_space());
397   ActivateIncrementalWriteBarrier(heap_->code_space());
398   ActivateIncrementalWriteBarrier(heap_->new_space());
399
400   LargePage* lop = heap_->lo_space()->first_page();
401   while (lop->is_valid()) {
402     SetOldSpacePageFlags(lop, true, is_compacting_);
403     lop = lop->next_page();
404   }
405 }
406
407
408 bool IncrementalMarking::ShouldActivate() {
409   return WorthActivating() &&
410          heap_->NextGCIsLikelyToBeFull(
411              heap_->old_generation_allocation_limit());
412 }
413
414
415 bool IncrementalMarking::WasActivated() { return was_activated_; }
416
417
418 bool IncrementalMarking::WorthActivating() {
419 #ifndef DEBUG
420   static const intptr_t kActivationThreshold = 8 * MB;
421 #else
422   // TODO(gc) consider setting this to some low level so that some
423   // debug tests run with incremental marking and some without.
424   static const intptr_t kActivationThreshold = 0;
425 #endif
426   // Only start incremental marking in a safe state: 1) when incremental
427   // marking is turned on, 2) when we are currently not in a GC, and
428   // 3) when we are currently not serializing or deserializing the heap.
429   return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
430          heap_->gc_state() == Heap::NOT_IN_GC &&
431          heap_->deserialization_complete() &&
432          !heap_->isolate()->serializer_enabled() &&
433          heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
434 }
435
436
437 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
438   DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
439
440   if (!IsMarking()) {
441     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
442     // we don't need to do anything if incremental marking is
443     // not active.
444   } else if (IsCompacting()) {
445     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
446   } else {
447     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
448   }
449 }
450
451
452 static void PatchIncrementalMarkingRecordWriteStubs(
453     Heap* heap, RecordWriteStub::Mode mode) {
454   UnseededNumberDictionary* stubs = heap->code_stubs();
455
456   int capacity = stubs->Capacity();
457   for (int i = 0; i < capacity; i++) {
458     Object* k = stubs->KeyAt(i);
459     if (stubs->IsKey(k)) {
460       uint32_t key = NumberToUint32(k);
461
462       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
463         Object* e = stubs->ValueAt(i);
464         if (e->IsCode()) {
465           RecordWriteStub::Patch(Code::cast(e), mode);
466         }
467       }
468     }
469   }
470 }
471
472
473 void IncrementalMarking::Start(CompactionFlag flag) {
474   if (FLAG_trace_incremental_marking) {
475     PrintF("[IncrementalMarking] Start\n");
476   }
477   DCHECK(FLAG_incremental_marking);
478   DCHECK(FLAG_incremental_marking_steps);
479   DCHECK(state_ == STOPPED);
480   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
481   DCHECK(!heap_->isolate()->serializer_enabled());
482
483   ResetStepCounters();
484
485   was_activated_ = true;
486
487   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
488     StartMarking(flag);
489   } else {
490     if (FLAG_trace_incremental_marking) {
491       PrintF("[IncrementalMarking] Start sweeping.\n");
492     }
493     state_ = SWEEPING;
494   }
495
496   heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
497 }
498
499
500 void IncrementalMarking::StartMarking(CompactionFlag flag) {
501   if (FLAG_trace_incremental_marking) {
502     PrintF("[IncrementalMarking] Start marking\n");
503   }
504
505   is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
506                    heap_->mark_compact_collector()->StartCompaction(
507                        MarkCompactCollector::INCREMENTAL_COMPACTION);
508
509   state_ = MARKING;
510
511   RecordWriteStub::Mode mode = is_compacting_
512                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
513                                    : RecordWriteStub::INCREMENTAL;
514
515   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
516
517   heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
518
519   ActivateIncrementalWriteBarrier();
520
521 // Marking bits are cleared by the sweeper.
522 #ifdef VERIFY_HEAP
523   if (FLAG_verify_heap) {
524     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
525   }
526 #endif
527
528   heap_->CompletelyClearInstanceofCache();
529   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
530
531   if (FLAG_cleanup_code_caches_at_gc) {
532     // We will mark cache black with a separate pass
533     // when we finish marking.
534     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
535   }
536
537   // Mark strong roots grey.
538   IncrementalMarkingRootMarkingVisitor visitor(this);
539   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
540
541   // Ready to start incremental marking.
542   if (FLAG_trace_incremental_marking) {
543     PrintF("[IncrementalMarking] Running\n");
544   }
545 }
546
547
548 void IncrementalMarking::MarkObjectGroups() {
549   DCHECK(FLAG_overapproximate_weak_closure);
550   DCHECK(!weak_closure_was_overapproximated_);
551
552   int old_marking_deque_top =
553       heap_->mark_compact_collector()->marking_deque()->top();
554
555   heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
556
557   IncrementalMarkingRootMarkingVisitor visitor(this);
558   heap_->isolate()->global_handles()->IterateObjectGroups(
559       &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
560
561   int marking_progress =
562       abs(old_marking_deque_top -
563           heap_->mark_compact_collector()->marking_deque()->top());
564
565   ++weak_closure_approximation_rounds_;
566   if ((weak_closure_approximation_rounds_ >=
567        FLAG_max_object_groups_marking_rounds) ||
568       (marking_progress < FLAG_min_progress_during_object_groups_marking)) {
569     weak_closure_was_overapproximated_ = true;
570   }
571
572   heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
573   heap_->isolate()->global_handles()->RemoveObjectGroups();
574 }
575
576
577 void IncrementalMarking::PrepareForScavenge() {
578   if (!IsMarking()) return;
579   NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
580                           heap_->new_space()->FromSpaceEnd());
581   while (it.has_next()) {
582     Bitmap::Clear(it.next());
583   }
584 }
585
586
587 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
588   if (!IsMarking()) return;
589
590   MarkingDeque* marking_deque =
591       heap_->mark_compact_collector()->marking_deque();
592   int current = marking_deque->bottom();
593   int mask = marking_deque->mask();
594   int limit = marking_deque->top();
595   HeapObject** array = marking_deque->array();
596   int new_top = current;
597
598   Map* filler_map = heap_->one_pointer_filler_map();
599
600   while (current != limit) {
601     HeapObject* obj = array[current];
602     DCHECK(obj->IsHeapObject());
603     current = ((current + 1) & mask);
604     if (heap_->InNewSpace(obj)) {
605       MapWord map_word = obj->map_word();
606       if (map_word.IsForwardingAddress()) {
607         HeapObject* dest = map_word.ToForwardingAddress();
608         array[new_top] = dest;
609         new_top = ((new_top + 1) & mask);
610         DCHECK(new_top != marking_deque->bottom());
611 #ifdef DEBUG
612         MarkBit mark_bit = Marking::MarkBitFrom(obj);
613         DCHECK(Marking::IsGrey(mark_bit) ||
614                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
615 #endif
616       }
617     } else if (obj->map() != filler_map) {
618       // Skip one word filler objects that appear on the
619       // stack when we perform in place array shift.
620       array[new_top] = obj;
621       new_top = ((new_top + 1) & mask);
622       DCHECK(new_top != marking_deque->bottom());
623 #ifdef DEBUG
624       MarkBit mark_bit = Marking::MarkBitFrom(obj);
625       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
626       DCHECK(Marking::IsGrey(mark_bit) ||
627              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
628              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
629               Marking::IsBlack(mark_bit)));
630 #endif
631     }
632   }
633   marking_deque->set_top(new_top);
634 }
635
636
637 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
638   MarkBit map_mark_bit = Marking::MarkBitFrom(map);
639   if (Marking::IsWhite(map_mark_bit)) {
640     WhiteToGreyAndPush(map, map_mark_bit);
641   }
642
643   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
644
645   MarkBit mark_bit = Marking::MarkBitFrom(obj);
646 #if ENABLE_SLOW_DCHECKS
647   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
648   SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
649               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
650               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
651                Marking::IsBlack(mark_bit)));
652 #endif
653   MarkBlackOrKeepBlack(obj, mark_bit, size);
654 }
655
656
657 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
658   MarkBit mark_bit = Marking::MarkBitFrom(obj);
659   if (mark_bit.data_only()) {
660     MarkBlackOrKeepGrey(obj, mark_bit, obj->Size());
661   } else if (Marking::IsWhite(mark_bit)) {
662     heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
663   }
664 }
665
666
667 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
668   intptr_t bytes_processed = 0;
669   Map* filler_map = heap_->one_pointer_filler_map();
670   MarkingDeque* marking_deque =
671       heap_->mark_compact_collector()->marking_deque();
672   while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
673     HeapObject* obj = marking_deque->Pop();
674
675     // Explicitly skip one word fillers. Incremental markbit patterns are
676     // correct only for objects that occupy at least two words.
677     Map* map = obj->map();
678     if (map == filler_map) continue;
679
680     int size = obj->SizeFromMap(map);
681     unscanned_bytes_of_large_object_ = 0;
682     VisitObject(map, obj, size);
683     bytes_processed += size - unscanned_bytes_of_large_object_;
684   }
685   return bytes_processed;
686 }
687
688
689 void IncrementalMarking::ProcessMarkingDeque() {
690   Map* filler_map = heap_->one_pointer_filler_map();
691   MarkingDeque* marking_deque =
692       heap_->mark_compact_collector()->marking_deque();
693   while (!marking_deque->IsEmpty()) {
694     HeapObject* obj = marking_deque->Pop();
695
696     // Explicitly skip one word fillers. Incremental markbit patterns are
697     // correct only for objects that occupy at least two words.
698     Map* map = obj->map();
699     if (map == filler_map) continue;
700
701     VisitObject(map, obj, obj->SizeFromMap(map));
702   }
703 }
704
705
706 void IncrementalMarking::Hurry() {
707   if (state() == MARKING) {
708     double start = 0.0;
709     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
710       start = base::OS::TimeCurrentMillis();
711       if (FLAG_trace_incremental_marking) {
712         PrintF("[IncrementalMarking] Hurry\n");
713       }
714     }
715     // TODO(gc) hurry can mark objects it encounters black as mutator
716     // was stopped.
717     ProcessMarkingDeque();
718     state_ = COMPLETE;
719     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
720       double end = base::OS::TimeCurrentMillis();
721       double delta = end - start;
722       heap_->tracer()->AddMarkingTime(delta);
723       if (FLAG_trace_incremental_marking) {
724         PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
725                static_cast<int>(delta));
726       }
727     }
728   }
729
730   if (FLAG_cleanup_code_caches_at_gc) {
731     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
732     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
733     MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
734                                           PolymorphicCodeCache::kSize);
735   }
736
737   Object* context = heap_->native_contexts_list();
738   while (!context->IsUndefined()) {
739     // GC can happen when the context is not fully initialized,
740     // so the cache can be undefined.
741     HeapObject* cache = HeapObject::cast(
742         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
743     if (!cache->IsUndefined()) {
744       MarkBit mark_bit = Marking::MarkBitFrom(cache);
745       if (Marking::IsGrey(mark_bit)) {
746         Marking::GreyToBlack(mark_bit);
747         MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
748       }
749     }
750     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
751   }
752 }
753
754
755 void IncrementalMarking::Abort() {
756   if (IsStopped()) return;
757   if (FLAG_trace_incremental_marking) {
758     PrintF("[IncrementalMarking] Aborting.\n");
759   }
760   heap_->new_space()->LowerInlineAllocationLimit(0);
761   IncrementalMarking::set_should_hurry(false);
762   ResetStepCounters();
763   if (IsMarking()) {
764     PatchIncrementalMarkingRecordWriteStubs(heap_,
765                                             RecordWriteStub::STORE_BUFFER_ONLY);
766     DeactivateIncrementalWriteBarrier();
767
768     if (is_compacting_) {
769       LargeObjectIterator it(heap_->lo_space());
770       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
771         Page* p = Page::FromAddress(obj->address());
772         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
773           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
774         }
775       }
776     }
777   }
778   heap_->isolate()->stack_guard()->ClearGC();
779   state_ = STOPPED;
780   is_compacting_ = false;
781 }
782
783
784 void IncrementalMarking::Finalize() {
785   Hurry();
786   state_ = STOPPED;
787   is_compacting_ = false;
788   heap_->new_space()->LowerInlineAllocationLimit(0);
789   IncrementalMarking::set_should_hurry(false);
790   ResetStepCounters();
791   PatchIncrementalMarkingRecordWriteStubs(heap_,
792                                           RecordWriteStub::STORE_BUFFER_ONLY);
793   DeactivateIncrementalWriteBarrier();
794   DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
795   heap_->isolate()->stack_guard()->ClearGC();
796 }
797
798
799 void IncrementalMarking::OverApproximateWeakClosure(CompletionAction action) {
800   DCHECK(FLAG_overapproximate_weak_closure);
801   DCHECK(!weak_closure_was_overapproximated_);
802   if (FLAG_trace_incremental_marking) {
803     PrintF("[IncrementalMarking] requesting weak closure overapproximation.\n");
804   }
805   request_type_ = OVERAPPROXIMATION;
806   if (action == GC_VIA_STACK_GUARD) {
807     heap_->isolate()->stack_guard()->RequestGC();
808   }
809 }
810
811
812 void IncrementalMarking::MarkingComplete(CompletionAction action) {
813   state_ = COMPLETE;
814   // We will set the stack guard to request a GC now.  This will mean the rest
815   // of the GC gets performed as soon as possible (we can't do a GC here in a
816   // record-write context).  If a few things get allocated between now and then
817   // that shouldn't make us do a scavenge and keep being incremental, so we set
818   // the should-hurry flag to indicate that there can't be much work left to do.
819   set_should_hurry(true);
820   if (FLAG_trace_incremental_marking) {
821     PrintF("[IncrementalMarking] Complete (normal).\n");
822   }
823   request_type_ = COMPLETE_MARKING;
824   if (action == GC_VIA_STACK_GUARD) {
825     heap_->isolate()->stack_guard()->RequestGC();
826   }
827 }
828
829
830 void IncrementalMarking::Epilogue() {
831   was_activated_ = false;
832   weak_closure_was_overapproximated_ = false;
833   weak_closure_approximation_rounds_ = 0;
834 }
835
836
837 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
838   if (IsStopped() && ShouldActivate()) {
839     // TODO(hpayer): Let's play safe for now, but compaction should be
840     // in principle possible.
841     Start(PREVENT_COMPACTION);
842   } else {
843     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
844   }
845 }
846
847
848 void IncrementalMarking::SpeedUp() {
849   bool speed_up = false;
850
851   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
852     if (FLAG_trace_gc) {
853       PrintPID("Speed up marking after %d steps\n",
854                static_cast<int>(kMarkingSpeedAccellerationInterval));
855     }
856     speed_up = true;
857   }
858
859   bool space_left_is_very_small =
860       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
861
862   bool only_1_nth_of_space_that_was_available_still_left =
863       (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
864        old_generation_space_available_at_start_of_incremental_);
865
866   if (space_left_is_very_small ||
867       only_1_nth_of_space_that_was_available_still_left) {
868     if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
869     speed_up = true;
870   }
871
872   bool size_of_old_space_multiplied_by_n_during_marking =
873       (heap_->PromotedTotalSize() >
874        (marking_speed_ + 1) *
875            old_generation_space_used_at_start_of_incremental_);
876   if (size_of_old_space_multiplied_by_n_during_marking) {
877     speed_up = true;
878     if (FLAG_trace_gc) {
879       PrintPID("Speed up marking because of heap size increase\n");
880     }
881   }
882
883   int64_t promoted_during_marking =
884       heap_->PromotedTotalSize() -
885       old_generation_space_used_at_start_of_incremental_;
886   intptr_t delay = marking_speed_ * MB;
887   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
888
889   // We try to scan at at least twice the speed that we are allocating.
890   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
891     if (FLAG_trace_gc) {
892       PrintPID("Speed up marking because marker was not keeping up\n");
893     }
894     speed_up = true;
895   }
896
897   if (speed_up) {
898     if (state_ != MARKING) {
899       if (FLAG_trace_gc) {
900         PrintPID("Postponing speeding up marking until marking starts\n");
901       }
902     } else {
903       marking_speed_ += kMarkingSpeedAccelleration;
904       marking_speed_ = static_cast<int>(
905           Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
906       if (FLAG_trace_gc) {
907         PrintPID("Marking speed increased to %d\n", marking_speed_);
908       }
909     }
910   }
911 }
912
913
914 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
915                                   CompletionAction action,
916                                   ForceMarkingAction marking,
917                                   ForceCompletionAction completion) {
918   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
919       !FLAG_incremental_marking_steps ||
920       (state_ != SWEEPING && state_ != MARKING)) {
921     return 0;
922   }
923
924   allocated_ += allocated_bytes;
925
926   if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
927       write_barriers_invoked_since_last_step_ <
928           kWriteBarriersInvokedThreshold) {
929     return 0;
930   }
931
932   // If an idle notification happened recently, we delay marking steps.
933   if (marking == DO_NOT_FORCE_MARKING &&
934       heap_->RecentIdleNotificationHappened()) {
935     return 0;
936   }
937
938   if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
939
940   intptr_t bytes_processed = 0;
941   {
942     HistogramTimerScope incremental_marking_scope(
943         heap_->isolate()->counters()->gc_incremental_marking());
944     double start = base::OS::TimeCurrentMillis();
945
946     // The marking speed is driven either by the allocation rate or by the rate
947     // at which we are having to check the color of objects in the write
948     // barrier.
949     // It is possible for a tight non-allocating loop to run a lot of write
950     // barriers before we get here and check them (marking can only take place
951     // on
952     // allocation), so to reduce the lumpiness we don't use the write barriers
953     // invoked since last step directly to determine the amount of work to do.
954     intptr_t bytes_to_process =
955         marking_speed_ *
956         Max(allocated_, write_barriers_invoked_since_last_step_);
957     allocated_ = 0;
958     write_barriers_invoked_since_last_step_ = 0;
959
960     bytes_scanned_ += bytes_to_process;
961
962     if (state_ == SWEEPING) {
963       if (heap_->mark_compact_collector()->sweeping_in_progress() &&
964           (heap_->mark_compact_collector()->IsSweepingCompleted() ||
965            !heap_->concurrent_sweeping_enabled())) {
966         heap_->mark_compact_collector()->EnsureSweepingCompleted();
967       }
968       if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
969         bytes_scanned_ = 0;
970         StartMarking(PREVENT_COMPACTION);
971       }
972     } else if (state_ == MARKING) {
973       bytes_processed = ProcessMarkingDeque(bytes_to_process);
974       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
975         if (completion == FORCE_COMPLETION ||
976             IsIdleMarkingDelayCounterLimitReached()) {
977           if (FLAG_overapproximate_weak_closure &&
978               !weak_closure_was_overapproximated_) {
979             OverApproximateWeakClosure(action);
980           } else {
981             MarkingComplete(action);
982           }
983         } else {
984           IncrementIdleMarkingDelayCounter();
985         }
986       }
987     }
988
989     steps_count_++;
990
991     // Speed up marking if we are marking too slow or if we are almost done
992     // with marking.
993     SpeedUp();
994
995     double end = base::OS::TimeCurrentMillis();
996     double duration = (end - start);
997     // Note that we report zero bytes here when sweeping was in progress or
998     // when we just started incremental marking. In these cases we did not
999     // process the marking deque.
1000     heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1001   }
1002   return bytes_processed;
1003 }
1004
1005
1006 void IncrementalMarking::ResetStepCounters() {
1007   steps_count_ = 0;
1008   old_generation_space_available_at_start_of_incremental_ =
1009       SpaceLeftInOldSpace();
1010   old_generation_space_used_at_start_of_incremental_ =
1011       heap_->PromotedTotalSize();
1012   bytes_rescanned_ = 0;
1013   marking_speed_ = kInitialMarkingSpeed;
1014   bytes_scanned_ = 0;
1015   write_barriers_invoked_since_last_step_ = 0;
1016 }
1017
1018
1019 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1020   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1021 }
1022
1023
1024 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1025   return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1026 }
1027
1028
1029 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1030   idle_marking_delay_counter_++;
1031 }
1032
1033
1034 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1035   idle_marking_delay_counter_ = 0;
1036 }
1037 }
1038 }  // namespace v8::internal