Updated V8 from git://github.com/v8/v8.git to 57f8959fb264354ba1a2e5118db512f588917061
[profile/ivi/qtjsbackend.git] / src / 3rdparty / v8 / src / incremental-marking.cc
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "incremental-marking.h"
31
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "v8conversions.h"
35
36 namespace v8 {
37 namespace internal {
38
39
40 IncrementalMarking::IncrementalMarking(Heap* heap)
41     : heap_(heap),
42       state_(STOPPED),
43       marking_deque_memory_(NULL),
44       marking_deque_memory_committed_(false),
45       steps_count_(0),
46       steps_took_(0),
47       longest_step_(0.0),
48       old_generation_space_available_at_start_of_incremental_(0),
49       old_generation_space_used_at_start_of_incremental_(0),
50       steps_count_since_last_gc_(0),
51       steps_took_since_last_gc_(0),
52       should_hurry_(false),
53       allocation_marking_factor_(0),
54       allocated_(0),
55       no_marking_scope_depth_(0) {
56 }
57
58
59 void IncrementalMarking::TearDown() {
60   delete marking_deque_memory_;
61 }
62
63
64 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
65                                          Object** slot,
66                                          Object* value) {
67   if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
68     MarkBit obj_bit = Marking::MarkBitFrom(obj);
69     if (Marking::IsBlack(obj_bit)) {
70       // Object is not going to be rescanned we need to record the slot.
71       heap_->mark_compact_collector()->RecordSlot(
72           HeapObject::RawField(obj, 0), slot, value);
73     }
74   }
75 }
76
77
78 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
79                                              Object* value,
80                                              Isolate* isolate) {
81   ASSERT(obj->IsHeapObject());
82
83   // Fast cases should already be covered by RecordWriteStub.
84   ASSERT(value->IsHeapObject());
85   ASSERT(!value->IsHeapNumber());
86   ASSERT(!value->IsString() ||
87          value->IsConsString() ||
88          value->IsSlicedString());
89   ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
90
91   IncrementalMarking* marking = isolate->heap()->incremental_marking();
92   ASSERT(!marking->is_compacting_);
93   marking->RecordWrite(obj, NULL, value);
94 }
95
96
97 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
98                                                           Object** slot,
99                                                           Isolate* isolate) {
100   IncrementalMarking* marking = isolate->heap()->incremental_marking();
101   ASSERT(marking->is_compacting_);
102   marking->RecordWrite(obj, slot, *slot);
103 }
104
105
106 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
107                                                Address pc,
108                                                HeapObject* value) {
109   if (IsMarking()) {
110     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
111     RecordWriteIntoCode(host, &rinfo, value);
112   }
113 }
114
115
116 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
117   if (IsMarking()) {
118     Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
119         GcSafeFindCodeForInnerPointer(pc);
120     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
121     RecordWriteIntoCode(host, &rinfo, value);
122   }
123 }
124
125
126 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
127                                                 Object** slot,
128                                                 Code* value) {
129   if (BaseRecordWrite(host, slot, value) && is_compacting_) {
130     ASSERT(slot != NULL);
131     heap_->mark_compact_collector()->
132         RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
133   }
134 }
135
136
137 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
138                                                  RelocInfo* rinfo,
139                                                  Object* value) {
140   MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
141   if (Marking::IsWhite(value_bit)) {
142     MarkBit obj_bit = Marking::MarkBitFrom(obj);
143     if (Marking::IsBlack(obj_bit)) {
144       BlackToGreyAndUnshift(obj, obj_bit);
145       RestartIfNotMarking();
146     }
147     // Object is either grey or white.  It will be scanned if survives.
148     return;
149   }
150
151   if (is_compacting_) {
152     MarkBit obj_bit = Marking::MarkBitFrom(obj);
153     if (Marking::IsBlack(obj_bit)) {
154       // Object is not going to be rescanned.  We need to record the slot.
155       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
156                                                        Code::cast(value));
157     }
158   }
159 }
160
161
162 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
163  public:
164   IncrementalMarkingMarkingVisitor(Heap* heap,
165                                    IncrementalMarking* incremental_marking)
166       : heap_(heap),
167         incremental_marking_(incremental_marking) {
168   }
169
170   void VisitEmbeddedPointer(RelocInfo* rinfo) {
171     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
172     Object* target = rinfo->target_object();
173     if (target->NonFailureIsHeapObject()) {
174       heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
175       MarkObject(target);
176     }
177   }
178
179   void VisitCodeTarget(RelocInfo* rinfo) {
180     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
181     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
182     if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
183         && (target->ic_age() != heap_->global_ic_age())) {
184       IC::Clear(rinfo->pc());
185       target = Code::GetCodeFromTargetAddress(rinfo->target_address());
186     }
187     heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
188     MarkObject(target);
189   }
190
191   void VisitDebugTarget(RelocInfo* rinfo) {
192     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
193             rinfo->IsPatchedReturnSequence()) ||
194            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
195             rinfo->IsPatchedDebugBreakSlotSequence()));
196     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
197     heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
198     MarkObject(target);
199   }
200
201   void VisitCodeEntry(Address entry_address) {
202     Object* target = Code::GetObjectFromEntryAddress(entry_address);
203     heap_->mark_compact_collector()->
204         RecordCodeEntrySlot(entry_address, Code::cast(target));
205     MarkObject(target);
206   }
207
208   void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
209     if (shared->ic_age() != heap_->global_ic_age()) {
210       shared->ResetForNewContext(heap_->global_ic_age());
211     }
212   }
213
214   void VisitPointer(Object** p) {
215     Object* obj = *p;
216     if (obj->NonFailureIsHeapObject()) {
217       heap_->mark_compact_collector()->RecordSlot(p, p, obj);
218       MarkObject(obj);
219     }
220   }
221
222   void VisitPointers(Object** start, Object** end) {
223     for (Object** p = start; p < end; p++) {
224       Object* obj = *p;
225       if (obj->NonFailureIsHeapObject()) {
226         heap_->mark_compact_collector()->RecordSlot(start, p, obj);
227         MarkObject(obj);
228       }
229     }
230   }
231
232  private:
233   // Mark object pointed to by p.
234   INLINE(void MarkObject(Object* obj)) {
235     HeapObject* heap_object = HeapObject::cast(obj);
236     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
237     if (mark_bit.data_only()) {
238       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
239         MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
240                                               heap_object->Size());
241       }
242     } else if (Marking::IsWhite(mark_bit)) {
243       incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
244     }
245   }
246
247   Heap* heap_;
248   IncrementalMarking* incremental_marking_;
249 };
250
251
252 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
253  public:
254   IncrementalMarkingRootMarkingVisitor(Heap* heap,
255                                        IncrementalMarking* incremental_marking)
256       : heap_(heap),
257         incremental_marking_(incremental_marking) {
258   }
259
260   void VisitPointer(Object** p) {
261     MarkObjectByPointer(p);
262   }
263
264   void VisitPointers(Object** start, Object** end) {
265     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
266   }
267
268  private:
269   void MarkObjectByPointer(Object** p) {
270     Object* obj = *p;
271     if (!obj->IsHeapObject()) return;
272
273     HeapObject* heap_object = HeapObject::cast(obj);
274     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
275     if (mark_bit.data_only()) {
276       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
277           MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
278                                                 heap_object->Size());
279       }
280     } else {
281       if (Marking::IsWhite(mark_bit)) {
282         incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
283       }
284     }
285   }
286
287   Heap* heap_;
288   IncrementalMarking* incremental_marking_;
289 };
290
291
292 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
293                                               bool is_marking,
294                                               bool is_compacting) {
295   if (is_marking) {
296     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
297     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
298
299     // It's difficult to filter out slots recorded for large objects.
300     if (chunk->owner()->identity() == LO_SPACE &&
301         chunk->size() > static_cast<size_t>(Page::kPageSize) &&
302         is_compacting) {
303       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
304     }
305   } else if (chunk->owner()->identity() == CELL_SPACE ||
306              chunk->scan_on_scavenge()) {
307     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
308     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
309   } else {
310     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
311     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
312   }
313 }
314
315
316 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
317                                               bool is_marking) {
318   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
319   if (is_marking) {
320     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
321   } else {
322     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
323   }
324   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
325 }
326
327
328 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
329     PagedSpace* space) {
330   PageIterator it(space);
331   while (it.has_next()) {
332     Page* p = it.next();
333     SetOldSpacePageFlags(p, false, false);
334   }
335 }
336
337
338 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
339     NewSpace* space) {
340   NewSpacePageIterator it(space);
341   while (it.has_next()) {
342     NewSpacePage* p = it.next();
343     SetNewSpacePageFlags(p, false);
344   }
345 }
346
347
348 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
349   DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
350   DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
351   DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
352   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
353   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
354   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
355
356   LargePage* lop = heap_->lo_space()->first_page();
357   while (lop->is_valid()) {
358     SetOldSpacePageFlags(lop, false, false);
359     lop = lop->next_page();
360   }
361 }
362
363
364 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
365   PageIterator it(space);
366   while (it.has_next()) {
367     Page* p = it.next();
368     SetOldSpacePageFlags(p, true, is_compacting_);
369   }
370 }
371
372
373 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
374   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
375   while (it.has_next()) {
376     NewSpacePage* p = it.next();
377     SetNewSpacePageFlags(p, true);
378   }
379 }
380
381
382 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
383   ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
384   ActivateIncrementalWriteBarrier(heap_->old_data_space());
385   ActivateIncrementalWriteBarrier(heap_->cell_space());
386   ActivateIncrementalWriteBarrier(heap_->map_space());
387   ActivateIncrementalWriteBarrier(heap_->code_space());
388   ActivateIncrementalWriteBarrier(heap_->new_space());
389
390   LargePage* lop = heap_->lo_space()->first_page();
391   while (lop->is_valid()) {
392     SetOldSpacePageFlags(lop, true, is_compacting_);
393     lop = lop->next_page();
394   }
395 }
396
397
398 bool IncrementalMarking::WorthActivating() {
399 #ifndef DEBUG
400   static const intptr_t kActivationThreshold = 8 * MB;
401 #else
402   // TODO(gc) consider setting this to some low level so that some
403   // debug tests run with incremental marking and some without.
404   static const intptr_t kActivationThreshold = 0;
405 #endif
406
407   return !FLAG_expose_gc &&
408       FLAG_incremental_marking &&
409       !Serializer::enabled() &&
410       heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
411 }
412
413
414 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
415   ASSERT(RecordWriteStub::GetMode(stub) ==
416          RecordWriteStub::STORE_BUFFER_ONLY);
417
418   if (!IsMarking()) {
419     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
420     // we don't need to do anything if incremental marking is
421     // not active.
422   } else if (IsCompacting()) {
423     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
424   } else {
425     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
426   }
427 }
428
429
430 static void PatchIncrementalMarkingRecordWriteStubs(
431     Heap* heap, RecordWriteStub::Mode mode) {
432   UnseededNumberDictionary* stubs = heap->code_stubs();
433
434   int capacity = stubs->Capacity();
435   for (int i = 0; i < capacity; i++) {
436     Object* k = stubs->KeyAt(i);
437     if (stubs->IsKey(k)) {
438       uint32_t key = NumberToUint32(k);
439
440       if (CodeStub::MajorKeyFromKey(key) ==
441           CodeStub::RecordWrite) {
442         Object* e = stubs->ValueAt(i);
443         if (e->IsCode()) {
444           RecordWriteStub::Patch(Code::cast(e), mode);
445         }
446       }
447     }
448   }
449 }
450
451
452 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
453   if (marking_deque_memory_ == NULL) {
454     marking_deque_memory_ = new VirtualMemory(4 * MB);
455   }
456   if (!marking_deque_memory_committed_) {
457     bool success = marking_deque_memory_->Commit(
458         reinterpret_cast<Address>(marking_deque_memory_->address()),
459         marking_deque_memory_->size(),
460         false);  // Not executable.
461     CHECK(success);
462     marking_deque_memory_committed_ = true;
463   }
464 }
465
466 void IncrementalMarking::UncommitMarkingDeque() {
467   if (state_ == STOPPED && marking_deque_memory_committed_) {
468     bool success = marking_deque_memory_->Uncommit(
469         reinterpret_cast<Address>(marking_deque_memory_->address()),
470         marking_deque_memory_->size());
471     CHECK(success);
472     marking_deque_memory_committed_ = false;
473   }
474 }
475
476
477 void IncrementalMarking::Start() {
478   if (FLAG_trace_incremental_marking) {
479     PrintF("[IncrementalMarking] Start\n");
480   }
481   ASSERT(FLAG_incremental_marking);
482   ASSERT(state_ == STOPPED);
483
484   ResetStepCounters();
485
486   if (heap_->old_pointer_space()->IsSweepingComplete() &&
487       heap_->old_data_space()->IsSweepingComplete()) {
488     StartMarking(ALLOW_COMPACTION);
489   } else {
490     if (FLAG_trace_incremental_marking) {
491       PrintF("[IncrementalMarking] Start sweeping.\n");
492     }
493     state_ = SWEEPING;
494   }
495
496   heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
497 }
498
499
500 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
501   if (obj->IsHeapObject()) {
502     HeapObject* heap_obj = HeapObject::cast(obj);
503     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
504     if (Marking::IsBlack(mark_bit)) {
505       MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
506                                             -heap_obj->Size());
507     }
508     Marking::AnyToGrey(mark_bit);
509   }
510 }
511
512
513 void IncrementalMarking::StartMarking(CompactionFlag flag) {
514   if (FLAG_trace_incremental_marking) {
515     PrintF("[IncrementalMarking] Start marking\n");
516   }
517
518   is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
519       heap_->mark_compact_collector()->StartCompaction(
520           MarkCompactCollector::INCREMENTAL_COMPACTION);
521
522   state_ = MARKING;
523
524   RecordWriteStub::Mode mode = is_compacting_ ?
525       RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
526
527   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
528
529   EnsureMarkingDequeIsCommitted();
530
531   // Initialize marking stack.
532   Address addr = static_cast<Address>(marking_deque_memory_->address());
533   size_t size = marking_deque_memory_->size();
534   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
535   marking_deque_.Initialize(addr, addr + size);
536
537   ActivateIncrementalWriteBarrier();
538
539 #ifdef DEBUG
540   // Marking bits are cleared by the sweeper.
541   if (FLAG_verify_heap) {
542     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
543   }
544 #endif
545
546   heap_->CompletelyClearInstanceofCache();
547   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
548
549   if (FLAG_cleanup_code_caches_at_gc) {
550     // We will mark cache black with a separate pass
551     // when we finish marking.
552     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
553   }
554
555   // Mark strong roots grey.
556   IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
557   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
558
559   // Ready to start incremental marking.
560   if (FLAG_trace_incremental_marking) {
561     PrintF("[IncrementalMarking] Running\n");
562   }
563 }
564
565
566 void IncrementalMarking::PrepareForScavenge() {
567   if (!IsMarking()) return;
568   NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
569                           heap_->new_space()->FromSpaceEnd());
570   while (it.has_next()) {
571     Bitmap::Clear(it.next());
572   }
573 }
574
575
576 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
577   if (!IsMarking()) return;
578
579   int current = marking_deque_.bottom();
580   int mask = marking_deque_.mask();
581   int limit = marking_deque_.top();
582   HeapObject** array = marking_deque_.array();
583   int new_top = current;
584
585   Map* filler_map = heap_->one_pointer_filler_map();
586
587   while (current != limit) {
588     HeapObject* obj = array[current];
589     ASSERT(obj->IsHeapObject());
590     current = ((current + 1) & mask);
591     if (heap_->InNewSpace(obj)) {
592       MapWord map_word = obj->map_word();
593       if (map_word.IsForwardingAddress()) {
594         HeapObject* dest = map_word.ToForwardingAddress();
595         array[new_top] = dest;
596         new_top = ((new_top + 1) & mask);
597         ASSERT(new_top != marking_deque_.bottom());
598 #ifdef DEBUG
599         MarkBit mark_bit = Marking::MarkBitFrom(obj);
600         ASSERT(Marking::IsGrey(mark_bit) ||
601                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
602 #endif
603       }
604     } else if (obj->map() != filler_map) {
605       // Skip one word filler objects that appear on the
606       // stack when we perform in place array shift.
607       array[new_top] = obj;
608       new_top = ((new_top + 1) & mask);
609       ASSERT(new_top != marking_deque_.bottom());
610 #ifdef DEBUG
611         MarkBit mark_bit = Marking::MarkBitFrom(obj);
612         ASSERT(Marking::IsGrey(mark_bit) ||
613                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
614 #endif
615     }
616   }
617   marking_deque_.set_top(new_top);
618
619   steps_took_since_last_gc_ = 0;
620   steps_count_since_last_gc_ = 0;
621   longest_step_ = 0.0;
622 }
623
624
625 void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
626   v->VisitPointers(
627       HeapObject::RawField(
628           ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
629       HeapObject::RawField(
630           ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
631
632   MarkCompactCollector* collector = heap_->mark_compact_collector();
633   for (int idx = Context::FIRST_WEAK_SLOT;
634        idx < Context::GLOBAL_CONTEXT_SLOTS;
635        ++idx) {
636     Object** slot =
637         HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
638     collector->RecordSlot(slot, slot, *slot);
639   }
640 }
641
642
643 void IncrementalMarking::Hurry() {
644   if (state() == MARKING) {
645     double start = 0.0;
646     if (FLAG_trace_incremental_marking) {
647       PrintF("[IncrementalMarking] Hurry\n");
648       start = OS::TimeCurrentMillis();
649     }
650     // TODO(gc) hurry can mark objects it encounters black as mutator
651     // was stopped.
652     Map* filler_map = heap_->one_pointer_filler_map();
653     Map* global_context_map = heap_->global_context_map();
654     IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
655     while (!marking_deque_.IsEmpty()) {
656       HeapObject* obj = marking_deque_.Pop();
657
658       // Explicitly skip one word fillers. Incremental markbit patterns are
659       // correct only for objects that occupy at least two words.
660       Map* map = obj->map();
661       if (map == filler_map) {
662         continue;
663       } else if (map == global_context_map) {
664         // Global contexts have weak fields.
665         VisitGlobalContext(Context::cast(obj), &marking_visitor);
666       } else {
667         obj->Iterate(&marking_visitor);
668       }
669
670       MarkBit mark_bit = Marking::MarkBitFrom(obj);
671       ASSERT(!Marking::IsBlack(mark_bit));
672       Marking::MarkBlack(mark_bit);
673       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
674     }
675     state_ = COMPLETE;
676     if (FLAG_trace_incremental_marking) {
677       double end = OS::TimeCurrentMillis();
678       PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
679              static_cast<int>(end - start));
680     }
681   }
682
683   if (FLAG_cleanup_code_caches_at_gc) {
684     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
685     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
686     MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
687                                           PolymorphicCodeCache::kSize);
688   }
689
690   Object* context = heap_->global_contexts_list();
691   while (!context->IsUndefined()) {
692     // GC can happen when the context is not fully initialized,
693     // so the cache can be undefined.
694     HeapObject* cache = HeapObject::cast(
695         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
696     if (!cache->IsUndefined()) {
697       MarkBit mark_bit = Marking::MarkBitFrom(cache);
698       if (Marking::IsGrey(mark_bit)) {
699         Marking::GreyToBlack(mark_bit);
700         MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
701       }
702     }
703     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
704   }
705 }
706
707
708 void IncrementalMarking::Abort() {
709   if (IsStopped()) return;
710   if (FLAG_trace_incremental_marking) {
711     PrintF("[IncrementalMarking] Aborting.\n");
712   }
713   heap_->new_space()->LowerInlineAllocationLimit(0);
714   IncrementalMarking::set_should_hurry(false);
715   ResetStepCounters();
716   if (IsMarking()) {
717     PatchIncrementalMarkingRecordWriteStubs(heap_,
718                                             RecordWriteStub::STORE_BUFFER_ONLY);
719     DeactivateIncrementalWriteBarrier();
720
721     if (is_compacting_) {
722       LargeObjectIterator it(heap_->lo_space());
723       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
724         Page* p = Page::FromAddress(obj->address());
725         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
726           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
727         }
728       }
729     }
730   }
731   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
732   state_ = STOPPED;
733   is_compacting_ = false;
734 }
735
736
737 void IncrementalMarking::Finalize() {
738   Hurry();
739   state_ = STOPPED;
740   is_compacting_ = false;
741   heap_->new_space()->LowerInlineAllocationLimit(0);
742   IncrementalMarking::set_should_hurry(false);
743   ResetStepCounters();
744   PatchIncrementalMarkingRecordWriteStubs(heap_,
745                                           RecordWriteStub::STORE_BUFFER_ONLY);
746   DeactivateIncrementalWriteBarrier();
747   ASSERT(marking_deque_.IsEmpty());
748   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
749 }
750
751
752 void IncrementalMarking::MarkingComplete(CompletionAction action) {
753   state_ = COMPLETE;
754   // We will set the stack guard to request a GC now.  This will mean the rest
755   // of the GC gets performed as soon as possible (we can't do a GC here in a
756   // record-write context).  If a few things get allocated between now and then
757   // that shouldn't make us do a scavenge and keep being incremental, so we set
758   // the should-hurry flag to indicate that there can't be much work left to do.
759   set_should_hurry(true);
760   if (FLAG_trace_incremental_marking) {
761     PrintF("[IncrementalMarking] Complete (normal).\n");
762   }
763   if (action == GC_VIA_STACK_GUARD) {
764     heap_->isolate()->stack_guard()->RequestGC();
765   }
766 }
767
768
769 void IncrementalMarking::Step(intptr_t allocated_bytes,
770                               CompletionAction action) {
771   if (heap_->gc_state() != Heap::NOT_IN_GC ||
772       !FLAG_incremental_marking ||
773       !FLAG_incremental_marking_steps ||
774       (state_ != SWEEPING && state_ != MARKING)) {
775     return;
776   }
777
778   allocated_ += allocated_bytes;
779
780   if (allocated_ < kAllocatedThreshold) return;
781
782   if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
783
784   intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
785   bytes_scanned_ += bytes_to_process;
786
787   double start = 0;
788
789   if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
790     start = OS::TimeCurrentMillis();
791   }
792
793   if (state_ == SWEEPING) {
794     if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
795       bytes_scanned_ = 0;
796       StartMarking(PREVENT_COMPACTION);
797     }
798   } else if (state_ == MARKING) {
799     Map* filler_map = heap_->one_pointer_filler_map();
800     Map* global_context_map = heap_->global_context_map();
801     IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
802     while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
803       HeapObject* obj = marking_deque_.Pop();
804
805       // Explicitly skip one word fillers. Incremental markbit patterns are
806       // correct only for objects that occupy at least two words.
807       Map* map = obj->map();
808       if (map == filler_map) continue;
809
810       if (obj->IsMap()) {
811         Map* map = Map::cast(obj);
812         heap_->ClearCacheOnMap(map);
813       }
814
815
816       int size = obj->SizeFromMap(map);
817       bytes_to_process -= size;
818       MarkBit map_mark_bit = Marking::MarkBitFrom(map);
819       if (Marking::IsWhite(map_mark_bit)) {
820         WhiteToGreyAndPush(map, map_mark_bit);
821       }
822
823       // TODO(gc) switch to static visitor instead of normal visitor.
824       if (map == global_context_map) {
825         // Global contexts have weak fields.
826         Context* ctx = Context::cast(obj);
827
828         // We will mark cache black with a separate pass
829         // when we finish marking.
830         MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
831
832         VisitGlobalContext(ctx, &marking_visitor);
833       } else {
834         obj->IterateBody(map->instance_type(), size, &marking_visitor);
835       }
836
837       MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
838       SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
839                   (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
840       Marking::MarkBlack(obj_mark_bit);
841       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
842     }
843     if (marking_deque_.IsEmpty()) MarkingComplete(action);
844   }
845
846   allocated_ = 0;
847
848   steps_count_++;
849   steps_count_since_last_gc_++;
850
851   bool speed_up = false;
852
853   if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
854     if (FLAG_trace_gc) {
855       PrintF("Speed up marking after %d steps\n",
856              static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
857     }
858     speed_up = true;
859   }
860
861   bool space_left_is_very_small =
862       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
863
864   bool only_1_nth_of_space_that_was_available_still_left =
865       (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
866           old_generation_space_available_at_start_of_incremental_);
867
868   if (space_left_is_very_small ||
869       only_1_nth_of_space_that_was_available_still_left) {
870     if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
871     speed_up = true;
872   }
873
874   bool size_of_old_space_multiplied_by_n_during_marking =
875       (heap_->PromotedTotalSize() >
876        (allocation_marking_factor_ + 1) *
877            old_generation_space_used_at_start_of_incremental_);
878   if (size_of_old_space_multiplied_by_n_during_marking) {
879     speed_up = true;
880     if (FLAG_trace_gc) {
881       PrintF("Speed up marking because of heap size increase\n");
882     }
883   }
884
885   int64_t promoted_during_marking = heap_->PromotedTotalSize()
886       - old_generation_space_used_at_start_of_incremental_;
887   intptr_t delay = allocation_marking_factor_ * MB;
888   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
889
890   // We try to scan at at least twice the speed that we are allocating.
891   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
892     if (FLAG_trace_gc) {
893       PrintF("Speed up marking because marker was not keeping up\n");
894     }
895     speed_up = true;
896   }
897
898   if (speed_up) {
899     if (state_ != MARKING) {
900       if (FLAG_trace_gc) {
901         PrintF("Postponing speeding up marking until marking starts\n");
902       }
903     } else {
904       allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
905       allocation_marking_factor_ = static_cast<int>(
906           Min(kMaxAllocationMarkingFactor,
907               static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
908       if (FLAG_trace_gc) {
909         PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
910       }
911     }
912   }
913
914   if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
915     double end = OS::TimeCurrentMillis();
916     double delta = (end - start);
917     longest_step_ = Max(longest_step_, delta);
918     steps_took_ += delta;
919     steps_took_since_last_gc_ += delta;
920   }
921 }
922
923
924 void IncrementalMarking::ResetStepCounters() {
925   steps_count_ = 0;
926   steps_took_ = 0;
927   longest_step_ = 0.0;
928   old_generation_space_available_at_start_of_incremental_ =
929       SpaceLeftInOldSpace();
930   old_generation_space_used_at_start_of_incremental_ =
931       heap_->PromotedTotalSize();
932   steps_count_since_last_gc_ = 0;
933   steps_took_since_last_gc_ = 0;
934   bytes_rescanned_ = 0;
935   allocation_marking_factor_ = kInitialAllocationMarkingFactor;
936   bytes_scanned_ = 0;
937 }
938
939
940 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
941   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize();
942 }
943
944 } }  // namespace v8::internal