[V8] Introduce a QML compilation mode
[profile/ivi/qtjsbackend.git] / src / 3rdparty / v8 / src / incremental-marking.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "incremental-marking.h"
31
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "v8conversions.h"
35
36 namespace v8 {
37 namespace internal {
38
39
40 IncrementalMarking::IncrementalMarking(Heap* heap)
41     : heap_(heap),
42       state_(STOPPED),
43       marking_deque_memory_(NULL),
44       marking_deque_memory_committed_(false),
45       marker_(this, heap->mark_compact_collector()),
46       steps_count_(0),
47       steps_took_(0),
48       longest_step_(0.0),
49       old_generation_space_available_at_start_of_incremental_(0),
50       old_generation_space_used_at_start_of_incremental_(0),
51       steps_count_since_last_gc_(0),
52       steps_took_since_last_gc_(0),
53       should_hurry_(false),
54       allocation_marking_factor_(0),
55       allocated_(0),
56       no_marking_scope_depth_(0) {
57 }
58
59
60 void IncrementalMarking::TearDown() {
61   delete marking_deque_memory_;
62 }
63
64
65 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
66                                          Object** slot,
67                                          Object* value) {
68   if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
69     MarkBit obj_bit = Marking::MarkBitFrom(obj);
70     if (Marking::IsBlack(obj_bit)) {
71       // Object is not going to be rescanned we need to record the slot.
72       heap_->mark_compact_collector()->RecordSlot(
73           HeapObject::RawField(obj, 0), slot, value);
74     }
75   }
76 }
77
78
79 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
80                                              Object* value,
81                                              Isolate* isolate) {
82   ASSERT(obj->IsHeapObject());
83
84   // Fast cases should already be covered by RecordWriteStub.
85   ASSERT(value->IsHeapObject());
86   ASSERT(!value->IsHeapNumber());
87   ASSERT(!value->IsString() ||
88          value->IsConsString() ||
89          value->IsSlicedString());
90   ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
91
92   IncrementalMarking* marking = isolate->heap()->incremental_marking();
93   ASSERT(!marking->is_compacting_);
94   marking->RecordWrite(obj, NULL, value);
95 }
96
97
98 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
99                                                           Object** slot,
100                                                           Isolate* isolate) {
101   IncrementalMarking* marking = isolate->heap()->incremental_marking();
102   ASSERT(marking->is_compacting_);
103   marking->RecordWrite(obj, slot, *slot);
104 }
105
106
107 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
108                                                Address pc,
109                                                HeapObject* value) {
110   if (IsMarking()) {
111     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
112     RecordWriteIntoCode(host, &rinfo, value);
113   }
114 }
115
116
117 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
118   if (IsMarking()) {
119     Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
120         GcSafeFindCodeForInnerPointer(pc);
121     RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
122     RecordWriteIntoCode(host, &rinfo, value);
123   }
124 }
125
126
127 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
128                                                 Object** slot,
129                                                 Code* value) {
130   if (BaseRecordWrite(host, slot, value) && is_compacting_) {
131     ASSERT(slot != NULL);
132     heap_->mark_compact_collector()->
133         RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
134   }
135 }
136
137
138 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
139                                                  RelocInfo* rinfo,
140                                                  Object* value) {
141   MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
142   if (Marking::IsWhite(value_bit)) {
143     MarkBit obj_bit = Marking::MarkBitFrom(obj);
144     if (Marking::IsBlack(obj_bit)) {
145       BlackToGreyAndUnshift(obj, obj_bit);
146       RestartIfNotMarking();
147     }
148     // Object is either grey or white.  It will be scanned if survives.
149     return;
150   }
151
152   if (is_compacting_) {
153     MarkBit obj_bit = Marking::MarkBitFrom(obj);
154     if (Marking::IsBlack(obj_bit)) {
155       // Object is not going to be rescanned.  We need to record the slot.
156       heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
157                                                        Code::cast(value));
158     }
159   }
160 }
161
162
163 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
164  public:
165   IncrementalMarkingMarkingVisitor(Heap* heap,
166                                    IncrementalMarking* incremental_marking)
167       : heap_(heap),
168         incremental_marking_(incremental_marking) {
169   }
170
171   void VisitEmbeddedPointer(RelocInfo* rinfo) {
172     ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
173     Object* target = rinfo->target_object();
174     if (target->NonFailureIsHeapObject()) {
175       heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
176       MarkObject(target);
177     }
178   }
179
180   void VisitCodeTarget(RelocInfo* rinfo) {
181     ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
182     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
183     if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
184         && (target->ic_age() != heap_->global_ic_age())) {
185       IC::Clear(rinfo->pc());
186       target = Code::GetCodeFromTargetAddress(rinfo->target_address());
187     }
188     heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
189     MarkObject(target);
190   }
191
192   void VisitDebugTarget(RelocInfo* rinfo) {
193     ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
194             rinfo->IsPatchedReturnSequence()) ||
195            (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
196             rinfo->IsPatchedDebugBreakSlotSequence()));
197     Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
198     heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
199     MarkObject(target);
200   }
201
202   void VisitCodeEntry(Address entry_address) {
203     Object* target = Code::GetObjectFromEntryAddress(entry_address);
204     heap_->mark_compact_collector()->
205         RecordCodeEntrySlot(entry_address, Code::cast(target));
206     MarkObject(target);
207   }
208
209   void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
210     if (shared->ic_age() != heap_->global_ic_age()) {
211       shared->ResetForNewContext(heap_->global_ic_age());
212     }
213   }
214
215   void VisitPointer(Object** p) {
216     Object* obj = *p;
217     if (obj->NonFailureIsHeapObject()) {
218       heap_->mark_compact_collector()->RecordSlot(p, p, obj);
219       MarkObject(obj);
220     }
221   }
222
223   void VisitPointers(Object** start, Object** end) {
224     for (Object** p = start; p < end; p++) {
225       Object* obj = *p;
226       if (obj->NonFailureIsHeapObject()) {
227         heap_->mark_compact_collector()->RecordSlot(start, p, obj);
228         MarkObject(obj);
229       }
230     }
231   }
232
233  private:
234   // Mark object pointed to by p.
235   INLINE(void MarkObject(Object* obj)) {
236     HeapObject* heap_object = HeapObject::cast(obj);
237     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
238     if (mark_bit.data_only()) {
239       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
240         MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
241                                               heap_object->Size());
242       }
243     } else if (Marking::IsWhite(mark_bit)) {
244       incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
245     }
246   }
247
248   Heap* heap_;
249   IncrementalMarking* incremental_marking_;
250 };
251
252
253 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
254  public:
255   IncrementalMarkingRootMarkingVisitor(Heap* heap,
256                                        IncrementalMarking* incremental_marking)
257       : heap_(heap),
258         incremental_marking_(incremental_marking) {
259   }
260
261   void VisitPointer(Object** p) {
262     MarkObjectByPointer(p);
263   }
264
265   void VisitPointers(Object** start, Object** end) {
266     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
267   }
268
269  private:
270   void MarkObjectByPointer(Object** p) {
271     Object* obj = *p;
272     if (!obj->IsHeapObject()) return;
273
274     HeapObject* heap_object = HeapObject::cast(obj);
275     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
276     if (mark_bit.data_only()) {
277       if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
278           MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
279                                                 heap_object->Size());
280       }
281     } else {
282       if (Marking::IsWhite(mark_bit)) {
283         incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
284       }
285     }
286   }
287
288   Heap* heap_;
289   IncrementalMarking* incremental_marking_;
290 };
291
292
293 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
294                                               bool is_marking,
295                                               bool is_compacting) {
296   if (is_marking) {
297     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
298     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
299
300     // It's difficult to filter out slots recorded for large objects.
301     if (chunk->owner()->identity() == LO_SPACE &&
302         chunk->size() > static_cast<size_t>(Page::kPageSize) &&
303         is_compacting) {
304       chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
305     }
306   } else if (chunk->owner()->identity() == CELL_SPACE ||
307              chunk->scan_on_scavenge()) {
308     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
309     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
310   } else {
311     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
312     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
313   }
314 }
315
316
317 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
318                                               bool is_marking) {
319   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
320   if (is_marking) {
321     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
322   } else {
323     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
324   }
325   chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
326 }
327
328
329 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
330     PagedSpace* space) {
331   PageIterator it(space);
332   while (it.has_next()) {
333     Page* p = it.next();
334     SetOldSpacePageFlags(p, false, false);
335   }
336 }
337
338
339 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
340     NewSpace* space) {
341   NewSpacePageIterator it(space);
342   while (it.has_next()) {
343     NewSpacePage* p = it.next();
344     SetNewSpacePageFlags(p, false);
345   }
346 }
347
348
349 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
350   DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
351   DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
352   DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
353   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
354   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
355   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
356
357   LargePage* lop = heap_->lo_space()->first_page();
358   while (lop->is_valid()) {
359     SetOldSpacePageFlags(lop, false, false);
360     lop = lop->next_page();
361   }
362 }
363
364
365 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
366   PageIterator it(space);
367   while (it.has_next()) {
368     Page* p = it.next();
369     SetOldSpacePageFlags(p, true, is_compacting_);
370   }
371 }
372
373
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
375   NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
376   while (it.has_next()) {
377     NewSpacePage* p = it.next();
378     SetNewSpacePageFlags(p, true);
379   }
380 }
381
382
383 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
384   ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
385   ActivateIncrementalWriteBarrier(heap_->old_data_space());
386   ActivateIncrementalWriteBarrier(heap_->cell_space());
387   ActivateIncrementalWriteBarrier(heap_->map_space());
388   ActivateIncrementalWriteBarrier(heap_->code_space());
389   ActivateIncrementalWriteBarrier(heap_->new_space());
390
391   LargePage* lop = heap_->lo_space()->first_page();
392   while (lop->is_valid()) {
393     SetOldSpacePageFlags(lop, true, is_compacting_);
394     lop = lop->next_page();
395   }
396 }
397
398
399 bool IncrementalMarking::WorthActivating() {
400 #ifndef DEBUG
401   static const intptr_t kActivationThreshold = 8 * MB;
402 #else
403   // TODO(gc) consider setting this to some low level so that some
404   // debug tests run with incremental marking and some without.
405   static const intptr_t kActivationThreshold = 0;
406 #endif
407
408   return !FLAG_expose_gc &&
409       FLAG_incremental_marking &&
410       !Serializer::enabled() &&
411       heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
412 }
413
414
415 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
416   ASSERT(RecordWriteStub::GetMode(stub) ==
417          RecordWriteStub::STORE_BUFFER_ONLY);
418
419   if (!IsMarking()) {
420     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
421     // we don't need to do anything if incremental marking is
422     // not active.
423   } else if (IsCompacting()) {
424     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
425   } else {
426     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
427   }
428 }
429
430
431 static void PatchIncrementalMarkingRecordWriteStubs(
432     Heap* heap, RecordWriteStub::Mode mode) {
433   UnseededNumberDictionary* stubs = heap->code_stubs();
434
435   int capacity = stubs->Capacity();
436   for (int i = 0; i < capacity; i++) {
437     Object* k = stubs->KeyAt(i);
438     if (stubs->IsKey(k)) {
439       uint32_t key = NumberToUint32(k);
440
441       if (CodeStub::MajorKeyFromKey(key) ==
442           CodeStub::RecordWrite) {
443         Object* e = stubs->ValueAt(i);
444         if (e->IsCode()) {
445           RecordWriteStub::Patch(Code::cast(e), mode);
446         }
447       }
448     }
449   }
450 }
451
452
453 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
454   if (marking_deque_memory_ == NULL) {
455     marking_deque_memory_ = new VirtualMemory(4 * MB);
456   }
457   if (!marking_deque_memory_committed_) {
458     bool success = marking_deque_memory_->Commit(
459         reinterpret_cast<Address>(marking_deque_memory_->address()),
460         marking_deque_memory_->size(),
461         false);  // Not executable.
462     CHECK(success);
463     marking_deque_memory_committed_ = true;
464   }
465 }
466
467 void IncrementalMarking::UncommitMarkingDeque() {
468   if (state_ == STOPPED && marking_deque_memory_committed_) {
469     bool success = marking_deque_memory_->Uncommit(
470         reinterpret_cast<Address>(marking_deque_memory_->address()),
471         marking_deque_memory_->size());
472     CHECK(success);
473     marking_deque_memory_committed_ = false;
474   }
475 }
476
477
478 void IncrementalMarking::Start() {
479   if (FLAG_trace_incremental_marking) {
480     PrintF("[IncrementalMarking] Start\n");
481   }
482   ASSERT(FLAG_incremental_marking);
483   ASSERT(state_ == STOPPED);
484
485   ResetStepCounters();
486
487   if (heap_->old_pointer_space()->IsSweepingComplete() &&
488       heap_->old_data_space()->IsSweepingComplete()) {
489     StartMarking(ALLOW_COMPACTION);
490   } else {
491     if (FLAG_trace_incremental_marking) {
492       PrintF("[IncrementalMarking] Start sweeping.\n");
493     }
494     state_ = SWEEPING;
495   }
496
497   heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
498 }
499
500
501 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
502   if (obj->IsHeapObject()) {
503     HeapObject* heap_obj = HeapObject::cast(obj);
504     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
505     if (Marking::IsBlack(mark_bit)) {
506       MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
507                                             -heap_obj->Size());
508     }
509     Marking::AnyToGrey(mark_bit);
510   }
511 }
512
513
514 void IncrementalMarking::StartMarking(CompactionFlag flag) {
515   if (FLAG_trace_incremental_marking) {
516     PrintF("[IncrementalMarking] Start marking\n");
517   }
518
519   is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
520       heap_->mark_compact_collector()->StartCompaction(
521           MarkCompactCollector::INCREMENTAL_COMPACTION);
522
523   state_ = MARKING;
524
525   RecordWriteStub::Mode mode = is_compacting_ ?
526       RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
527
528   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
529
530   EnsureMarkingDequeIsCommitted();
531
532   // Initialize marking stack.
533   Address addr = static_cast<Address>(marking_deque_memory_->address());
534   size_t size = marking_deque_memory_->size();
535   if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
536   marking_deque_.Initialize(addr, addr + size);
537
538   ActivateIncrementalWriteBarrier();
539
540 #ifdef DEBUG
541   // Marking bits are cleared by the sweeper.
542   if (FLAG_verify_heap) {
543     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
544   }
545 #endif
546
547   heap_->CompletelyClearInstanceofCache();
548   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
549
550   if (FLAG_cleanup_code_caches_at_gc) {
551     // We will mark cache black with a separate pass
552     // when we finish marking.
553     MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
554   }
555
556   // Mark strong roots grey.
557   IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
558   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
559
560   // Ready to start incremental marking.
561   if (FLAG_trace_incremental_marking) {
562     PrintF("[IncrementalMarking] Running\n");
563   }
564 }
565
566
567 void IncrementalMarking::PrepareForScavenge() {
568   if (!IsMarking()) return;
569   NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
570                           heap_->new_space()->FromSpaceEnd());
571   while (it.has_next()) {
572     Bitmap::Clear(it.next());
573   }
574 }
575
576
577 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
578   if (!IsMarking()) return;
579
580   int current = marking_deque_.bottom();
581   int mask = marking_deque_.mask();
582   int limit = marking_deque_.top();
583   HeapObject** array = marking_deque_.array();
584   int new_top = current;
585
586   Map* filler_map = heap_->one_pointer_filler_map();
587
588   while (current != limit) {
589     HeapObject* obj = array[current];
590     ASSERT(obj->IsHeapObject());
591     current = ((current + 1) & mask);
592     if (heap_->InNewSpace(obj)) {
593       MapWord map_word = obj->map_word();
594       if (map_word.IsForwardingAddress()) {
595         HeapObject* dest = map_word.ToForwardingAddress();
596         array[new_top] = dest;
597         new_top = ((new_top + 1) & mask);
598         ASSERT(new_top != marking_deque_.bottom());
599 #ifdef DEBUG
600         MarkBit mark_bit = Marking::MarkBitFrom(obj);
601         ASSERT(Marking::IsGrey(mark_bit) ||
602                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
603 #endif
604       }
605     } else if (obj->map() != filler_map) {
606       // Skip one word filler objects that appear on the
607       // stack when we perform in place array shift.
608       array[new_top] = obj;
609       new_top = ((new_top + 1) & mask);
610       ASSERT(new_top != marking_deque_.bottom());
611 #ifdef DEBUG
612         MarkBit mark_bit = Marking::MarkBitFrom(obj);
613         ASSERT(Marking::IsGrey(mark_bit) ||
614                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
615 #endif
616     }
617   }
618   marking_deque_.set_top(new_top);
619
620   steps_took_since_last_gc_ = 0;
621   steps_count_since_last_gc_ = 0;
622   longest_step_ = 0.0;
623 }
624
625
626 void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
627   v->VisitPointers(
628       HeapObject::RawField(
629           ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
630       HeapObject::RawField(
631           ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
632
633   MarkCompactCollector* collector = heap_->mark_compact_collector();
634   for (int idx = Context::FIRST_WEAK_SLOT;
635        idx < Context::GLOBAL_CONTEXT_SLOTS;
636        ++idx) {
637     Object** slot =
638         HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
639     collector->RecordSlot(slot, slot, *slot);
640   }
641 }
642
643
644 void IncrementalMarking::Hurry() {
645   if (state() == MARKING) {
646     double start = 0.0;
647     if (FLAG_trace_incremental_marking) {
648       PrintF("[IncrementalMarking] Hurry\n");
649       start = OS::TimeCurrentMillis();
650     }
651     // TODO(gc) hurry can mark objects it encounters black as mutator
652     // was stopped.
653     Map* filler_map = heap_->one_pointer_filler_map();
654     Map* global_context_map = heap_->global_context_map();
655     IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
656     while (!marking_deque_.IsEmpty()) {
657       HeapObject* obj = marking_deque_.Pop();
658
659       // Explicitly skip one word fillers. Incremental markbit patterns are
660       // correct only for objects that occupy at least two words.
661       Map* map = obj->map();
662       if (map == filler_map) {
663         continue;
664       } else if (map == global_context_map) {
665         // Global contexts have weak fields.
666         VisitGlobalContext(Context::cast(obj), &marking_visitor);
667       } else if (map->instance_type() == MAP_TYPE) {
668         Map* map = Map::cast(obj);
669         heap_->ClearCacheOnMap(map);
670
671         // When map collection is enabled we have to mark through map's
672         // transitions and back pointers in a special way to make these links
673         // weak.  Only maps for subclasses of JSReceiver can have transitions.
674         STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
675         if (FLAG_collect_maps &&
676             map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
677           marker_.MarkMapContents(map);
678         } else {
679           marking_visitor.VisitPointers(
680               HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
681               HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
682         }
683       } else {
684         obj->Iterate(&marking_visitor);
685       }
686
687       MarkBit mark_bit = Marking::MarkBitFrom(obj);
688       ASSERT(!Marking::IsBlack(mark_bit));
689       Marking::MarkBlack(mark_bit);
690       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
691     }
692     state_ = COMPLETE;
693     if (FLAG_trace_incremental_marking) {
694       double end = OS::TimeCurrentMillis();
695       PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
696              static_cast<int>(end - start));
697     }
698   }
699
700   if (FLAG_cleanup_code_caches_at_gc) {
701     PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
702     Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
703     MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
704                                           PolymorphicCodeCache::kSize);
705   }
706
707   Object* context = heap_->global_contexts_list();
708   while (!context->IsUndefined()) {
709     // GC can happen when the context is not fully initialized,
710     // so the cache can be undefined.
711     HeapObject* cache = HeapObject::cast(
712         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
713     if (!cache->IsUndefined()) {
714       MarkBit mark_bit = Marking::MarkBitFrom(cache);
715       if (Marking::IsGrey(mark_bit)) {
716         Marking::GreyToBlack(mark_bit);
717         MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
718       }
719     }
720     context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
721   }
722 }
723
724
725 void IncrementalMarking::Abort() {
726   if (IsStopped()) return;
727   if (FLAG_trace_incremental_marking) {
728     PrintF("[IncrementalMarking] Aborting.\n");
729   }
730   heap_->new_space()->LowerInlineAllocationLimit(0);
731   IncrementalMarking::set_should_hurry(false);
732   ResetStepCounters();
733   if (IsMarking()) {
734     PatchIncrementalMarkingRecordWriteStubs(heap_,
735                                             RecordWriteStub::STORE_BUFFER_ONLY);
736     DeactivateIncrementalWriteBarrier();
737
738     if (is_compacting_) {
739       LargeObjectIterator it(heap_->lo_space());
740       for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
741         Page* p = Page::FromAddress(obj->address());
742         if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
743           p->ClearFlag(Page::RESCAN_ON_EVACUATION);
744         }
745       }
746     }
747   }
748   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
749   state_ = STOPPED;
750   is_compacting_ = false;
751 }
752
753
754 void IncrementalMarking::Finalize() {
755   Hurry();
756   state_ = STOPPED;
757   is_compacting_ = false;
758   heap_->new_space()->LowerInlineAllocationLimit(0);
759   IncrementalMarking::set_should_hurry(false);
760   ResetStepCounters();
761   PatchIncrementalMarkingRecordWriteStubs(heap_,
762                                           RecordWriteStub::STORE_BUFFER_ONLY);
763   DeactivateIncrementalWriteBarrier();
764   ASSERT(marking_deque_.IsEmpty());
765   heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
766 }
767
768
769 void IncrementalMarking::MarkingComplete(CompletionAction action) {
770   state_ = COMPLETE;
771   // We will set the stack guard to request a GC now.  This will mean the rest
772   // of the GC gets performed as soon as possible (we can't do a GC here in a
773   // record-write context).  If a few things get allocated between now and then
774   // that shouldn't make us do a scavenge and keep being incremental, so we set
775   // the should-hurry flag to indicate that there can't be much work left to do.
776   set_should_hurry(true);
777   if (FLAG_trace_incremental_marking) {
778     PrintF("[IncrementalMarking] Complete (normal).\n");
779   }
780   if (action == GC_VIA_STACK_GUARD) {
781     heap_->isolate()->stack_guard()->RequestGC();
782   }
783 }
784
785
786 void IncrementalMarking::Step(intptr_t allocated_bytes,
787                               CompletionAction action) {
788   if (heap_->gc_state() != Heap::NOT_IN_GC ||
789       !FLAG_incremental_marking ||
790       !FLAG_incremental_marking_steps ||
791       (state_ != SWEEPING && state_ != MARKING)) {
792     return;
793   }
794
795   allocated_ += allocated_bytes;
796
797   if (allocated_ < kAllocatedThreshold) return;
798
799   if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
800
801   intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
802   bytes_scanned_ += bytes_to_process;
803
804   double start = 0;
805
806   if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
807     start = OS::TimeCurrentMillis();
808   }
809
810   if (state_ == SWEEPING) {
811     if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
812       bytes_scanned_ = 0;
813       StartMarking(PREVENT_COMPACTION);
814     }
815   } else if (state_ == MARKING) {
816     Map* filler_map = heap_->one_pointer_filler_map();
817     Map* global_context_map = heap_->global_context_map();
818     IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
819     while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
820       HeapObject* obj = marking_deque_.Pop();
821
822       // Explicitly skip one word fillers. Incremental markbit patterns are
823       // correct only for objects that occupy at least two words.
824       Map* map = obj->map();
825       if (map == filler_map) continue;
826
827       int size = obj->SizeFromMap(map);
828       bytes_to_process -= size;
829       MarkBit map_mark_bit = Marking::MarkBitFrom(map);
830       if (Marking::IsWhite(map_mark_bit)) {
831         WhiteToGreyAndPush(map, map_mark_bit);
832       }
833
834       // TODO(gc) switch to static visitor instead of normal visitor.
835       if (map == global_context_map) {
836         // Global contexts have weak fields.
837         Context* ctx = Context::cast(obj);
838
839         // We will mark cache black with a separate pass
840         // when we finish marking.
841         MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
842
843         VisitGlobalContext(ctx, &marking_visitor);
844       } else if (map->instance_type() == MAP_TYPE) {
845         Map* map = Map::cast(obj);
846         heap_->ClearCacheOnMap(map);
847
848         // When map collection is enabled we have to mark through map's
849         // transitions and back pointers in a special way to make these links
850         // weak.  Only maps for subclasses of JSReceiver can have transitions.
851         STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
852         if (FLAG_collect_maps &&
853             map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
854           marker_.MarkMapContents(map);
855         } else {
856           marking_visitor.VisitPointers(
857               HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
858               HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
859         }
860       } else if (map->instance_type() == JS_FUNCTION_TYPE) {
861         marking_visitor.VisitPointers(
862             HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
863             HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
864
865         marking_visitor.VisitCodeEntry(
866             obj->address() + JSFunction::kCodeEntryOffset);
867
868         marking_visitor.VisitPointers(
869             HeapObject::RawField(obj,
870                                  JSFunction::kCodeEntryOffset + kPointerSize),
871             HeapObject::RawField(obj,
872                                  JSFunction::kNonWeakFieldsEndOffset));
873       } else {
874         obj->IterateBody(map->instance_type(), size, &marking_visitor);
875       }
876
877       MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
878       SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
879                   (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
880       Marking::MarkBlack(obj_mark_bit);
881       MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
882     }
883     if (marking_deque_.IsEmpty()) MarkingComplete(action);
884   }
885
886   allocated_ = 0;
887
888   steps_count_++;
889   steps_count_since_last_gc_++;
890
891   bool speed_up = false;
892
893   if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
894     if (FLAG_trace_gc) {
895       PrintF("Speed up marking after %d steps\n",
896              static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
897     }
898     speed_up = true;
899   }
900
901   bool space_left_is_very_small =
902       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
903
904   bool only_1_nth_of_space_that_was_available_still_left =
905       (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
906           old_generation_space_available_at_start_of_incremental_);
907
908   if (space_left_is_very_small ||
909       only_1_nth_of_space_that_was_available_still_left) {
910     if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
911     speed_up = true;
912   }
913
914   bool size_of_old_space_multiplied_by_n_during_marking =
915       (heap_->PromotedTotalSize() >
916        (allocation_marking_factor_ + 1) *
917            old_generation_space_used_at_start_of_incremental_);
918   if (size_of_old_space_multiplied_by_n_during_marking) {
919     speed_up = true;
920     if (FLAG_trace_gc) {
921       PrintF("Speed up marking because of heap size increase\n");
922     }
923   }
924
925   int64_t promoted_during_marking = heap_->PromotedTotalSize()
926       - old_generation_space_used_at_start_of_incremental_;
927   intptr_t delay = allocation_marking_factor_ * MB;
928   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
929
930   // We try to scan at at least twice the speed that we are allocating.
931   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
932     if (FLAG_trace_gc) {
933       PrintF("Speed up marking because marker was not keeping up\n");
934     }
935     speed_up = true;
936   }
937
938   if (speed_up) {
939     if (state_ != MARKING) {
940       if (FLAG_trace_gc) {
941         PrintF("Postponing speeding up marking until marking starts\n");
942       }
943     } else {
944       allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
945       allocation_marking_factor_ = static_cast<int>(
946           Min(kMaxAllocationMarkingFactor,
947               static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
948       if (FLAG_trace_gc) {
949         PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
950       }
951     }
952   }
953
954   if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
955     double end = OS::TimeCurrentMillis();
956     double delta = (end - start);
957     longest_step_ = Max(longest_step_, delta);
958     steps_took_ += delta;
959     steps_took_since_last_gc_ += delta;
960   }
961 }
962
963
964 void IncrementalMarking::ResetStepCounters() {
965   steps_count_ = 0;
966   steps_took_ = 0;
967   longest_step_ = 0.0;
968   old_generation_space_available_at_start_of_incremental_ =
969       SpaceLeftInOldSpace();
970   old_generation_space_used_at_start_of_incremental_ =
971       heap_->PromotedTotalSize();
972   steps_count_since_last_gc_ = 0;
973   steps_took_since_last_gc_ = 0;
974   bytes_rescanned_ = 0;
975   allocation_marking_factor_ = kInitialAllocationMarkingFactor;
976   bytes_scanned_ = 0;
977 }
978
979
980 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
981   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
982 }
983
984 } }  // namespace v8::internal