1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "incremental-marking.h"
32 #include "code-stubs.h"
33 #include "compilation-cache.h"
34 #include "v8conversions.h"
40 IncrementalMarking::IncrementalMarking(Heap* heap)
43 marking_deque_memory_(NULL),
44 marking_deque_memory_committed_(false),
45 marker_(this, heap->mark_compact_collector()),
49 old_generation_space_available_at_start_of_incremental_(0),
50 old_generation_space_used_at_start_of_incremental_(0),
51 steps_count_since_last_gc_(0),
52 steps_took_since_last_gc_(0),
54 allocation_marking_factor_(0),
56 no_marking_scope_depth_(0) {
60 void IncrementalMarking::TearDown() {
61 delete marking_deque_memory_;
65 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
68 if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
69 MarkBit obj_bit = Marking::MarkBitFrom(obj);
70 if (Marking::IsBlack(obj_bit)) {
71 // Object is not going to be rescanned we need to record the slot.
72 heap_->mark_compact_collector()->RecordSlot(
73 HeapObject::RawField(obj, 0), slot, value);
79 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
82 ASSERT(obj->IsHeapObject());
84 // Fast cases should already be covered by RecordWriteStub.
85 ASSERT(value->IsHeapObject());
86 ASSERT(!value->IsHeapNumber());
87 ASSERT(!value->IsString() ||
88 value->IsConsString() ||
89 value->IsSlicedString());
90 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
92 IncrementalMarking* marking = isolate->heap()->incremental_marking();
93 ASSERT(!marking->is_compacting_);
94 marking->RecordWrite(obj, NULL, value);
98 void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
101 IncrementalMarking* marking = isolate->heap()->incremental_marking();
102 ASSERT(marking->is_compacting_);
103 marking->RecordWrite(obj, slot, *slot);
107 void IncrementalMarking::RecordCodeTargetPatch(Code* host,
111 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
112 RecordWriteIntoCode(host, &rinfo, value);
117 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
119 Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
120 GcSafeFindCodeForInnerPointer(pc);
121 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
122 RecordWriteIntoCode(host, &rinfo, value);
127 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
130 if (BaseRecordWrite(host, slot, value) && is_compacting_) {
131 ASSERT(slot != NULL);
132 heap_->mark_compact_collector()->
133 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
138 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
141 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
142 if (Marking::IsWhite(value_bit)) {
143 MarkBit obj_bit = Marking::MarkBitFrom(obj);
144 if (Marking::IsBlack(obj_bit)) {
145 BlackToGreyAndUnshift(obj, obj_bit);
146 RestartIfNotMarking();
148 // Object is either grey or white. It will be scanned if survives.
152 if (is_compacting_) {
153 MarkBit obj_bit = Marking::MarkBitFrom(obj);
154 if (Marking::IsBlack(obj_bit)) {
155 // Object is not going to be rescanned. We need to record the slot.
156 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
163 class IncrementalMarkingMarkingVisitor : public ObjectVisitor {
165 IncrementalMarkingMarkingVisitor(Heap* heap,
166 IncrementalMarking* incremental_marking)
168 incremental_marking_(incremental_marking) {
171 void VisitEmbeddedPointer(RelocInfo* rinfo) {
172 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
173 Object* target = rinfo->target_object();
174 if (target->NonFailureIsHeapObject()) {
175 heap_->mark_compact_collector()->RecordRelocSlot(rinfo, target);
180 void VisitCodeTarget(RelocInfo* rinfo) {
181 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
182 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
183 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
184 && (target->ic_age() != heap_->global_ic_age())) {
185 IC::Clear(rinfo->pc());
186 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
188 heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
192 void VisitDebugTarget(RelocInfo* rinfo) {
193 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
194 rinfo->IsPatchedReturnSequence()) ||
195 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
196 rinfo->IsPatchedDebugBreakSlotSequence()));
197 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
198 heap_->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
202 void VisitCodeEntry(Address entry_address) {
203 Object* target = Code::GetObjectFromEntryAddress(entry_address);
204 heap_->mark_compact_collector()->
205 RecordCodeEntrySlot(entry_address, Code::cast(target));
209 void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
210 if (shared->ic_age() != heap_->global_ic_age()) {
211 shared->ResetForNewContext(heap_->global_ic_age());
215 void VisitPointer(Object** p) {
217 if (obj->NonFailureIsHeapObject()) {
218 heap_->mark_compact_collector()->RecordSlot(p, p, obj);
223 void VisitPointers(Object** start, Object** end) {
224 for (Object** p = start; p < end; p++) {
226 if (obj->NonFailureIsHeapObject()) {
227 heap_->mark_compact_collector()->RecordSlot(start, p, obj);
234 // Mark object pointed to by p.
235 INLINE(void MarkObject(Object* obj)) {
236 HeapObject* heap_object = HeapObject::cast(obj);
237 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
238 if (mark_bit.data_only()) {
239 if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
240 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
241 heap_object->Size());
243 } else if (Marking::IsWhite(mark_bit)) {
244 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
249 IncrementalMarking* incremental_marking_;
253 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
255 IncrementalMarkingRootMarkingVisitor(Heap* heap,
256 IncrementalMarking* incremental_marking)
258 incremental_marking_(incremental_marking) {
261 void VisitPointer(Object** p) {
262 MarkObjectByPointer(p);
265 void VisitPointers(Object** start, Object** end) {
266 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
270 void MarkObjectByPointer(Object** p) {
272 if (!obj->IsHeapObject()) return;
274 HeapObject* heap_object = HeapObject::cast(obj);
275 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
276 if (mark_bit.data_only()) {
277 if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
278 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
279 heap_object->Size());
282 if (Marking::IsWhite(mark_bit)) {
283 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
289 IncrementalMarking* incremental_marking_;
293 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
295 bool is_compacting) {
297 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
298 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
300 // It's difficult to filter out slots recorded for large objects.
301 if (chunk->owner()->identity() == LO_SPACE &&
302 chunk->size() > static_cast<size_t>(Page::kPageSize) &&
304 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
306 } else if (chunk->owner()->identity() == CELL_SPACE ||
307 chunk->scan_on_scavenge()) {
308 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
309 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
311 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
312 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
317 void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
319 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
323 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
325 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
329 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
331 PageIterator it(space);
332 while (it.has_next()) {
334 SetOldSpacePageFlags(p, false, false);
339 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
341 NewSpacePageIterator it(space);
342 while (it.has_next()) {
343 NewSpacePage* p = it.next();
344 SetNewSpacePageFlags(p, false);
349 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
350 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
351 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
352 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
353 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
354 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
355 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
357 LargePage* lop = heap_->lo_space()->first_page();
358 while (lop->is_valid()) {
359 SetOldSpacePageFlags(lop, false, false);
360 lop = lop->next_page();
365 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
366 PageIterator it(space);
367 while (it.has_next()) {
369 SetOldSpacePageFlags(p, true, is_compacting_);
374 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
375 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
376 while (it.has_next()) {
377 NewSpacePage* p = it.next();
378 SetNewSpacePageFlags(p, true);
383 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
384 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
385 ActivateIncrementalWriteBarrier(heap_->old_data_space());
386 ActivateIncrementalWriteBarrier(heap_->cell_space());
387 ActivateIncrementalWriteBarrier(heap_->map_space());
388 ActivateIncrementalWriteBarrier(heap_->code_space());
389 ActivateIncrementalWriteBarrier(heap_->new_space());
391 LargePage* lop = heap_->lo_space()->first_page();
392 while (lop->is_valid()) {
393 SetOldSpacePageFlags(lop, true, is_compacting_);
394 lop = lop->next_page();
399 bool IncrementalMarking::WorthActivating() {
401 static const intptr_t kActivationThreshold = 8 * MB;
403 // TODO(gc) consider setting this to some low level so that some
404 // debug tests run with incremental marking and some without.
405 static const intptr_t kActivationThreshold = 0;
408 return !FLAG_expose_gc &&
409 FLAG_incremental_marking &&
410 !Serializer::enabled() &&
411 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
415 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
416 ASSERT(RecordWriteStub::GetMode(stub) ==
417 RecordWriteStub::STORE_BUFFER_ONLY);
420 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
421 // we don't need to do anything if incremental marking is
423 } else if (IsCompacting()) {
424 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
426 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
431 static void PatchIncrementalMarkingRecordWriteStubs(
432 Heap* heap, RecordWriteStub::Mode mode) {
433 UnseededNumberDictionary* stubs = heap->code_stubs();
435 int capacity = stubs->Capacity();
436 for (int i = 0; i < capacity; i++) {
437 Object* k = stubs->KeyAt(i);
438 if (stubs->IsKey(k)) {
439 uint32_t key = NumberToUint32(k);
441 if (CodeStub::MajorKeyFromKey(key) ==
442 CodeStub::RecordWrite) {
443 Object* e = stubs->ValueAt(i);
445 RecordWriteStub::Patch(Code::cast(e), mode);
453 void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
454 if (marking_deque_memory_ == NULL) {
455 marking_deque_memory_ = new VirtualMemory(4 * MB);
457 if (!marking_deque_memory_committed_) {
458 bool success = marking_deque_memory_->Commit(
459 reinterpret_cast<Address>(marking_deque_memory_->address()),
460 marking_deque_memory_->size(),
461 false); // Not executable.
463 marking_deque_memory_committed_ = true;
467 void IncrementalMarking::UncommitMarkingDeque() {
468 if (state_ == STOPPED && marking_deque_memory_committed_) {
469 bool success = marking_deque_memory_->Uncommit(
470 reinterpret_cast<Address>(marking_deque_memory_->address()),
471 marking_deque_memory_->size());
473 marking_deque_memory_committed_ = false;
478 void IncrementalMarking::Start() {
479 if (FLAG_trace_incremental_marking) {
480 PrintF("[IncrementalMarking] Start\n");
482 ASSERT(FLAG_incremental_marking);
483 ASSERT(state_ == STOPPED);
487 if (heap_->old_pointer_space()->IsSweepingComplete() &&
488 heap_->old_data_space()->IsSweepingComplete()) {
489 StartMarking(ALLOW_COMPACTION);
491 if (FLAG_trace_incremental_marking) {
492 PrintF("[IncrementalMarking] Start sweeping.\n");
497 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
501 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
502 if (obj->IsHeapObject()) {
503 HeapObject* heap_obj = HeapObject::cast(obj);
504 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
505 if (Marking::IsBlack(mark_bit)) {
506 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
509 Marking::AnyToGrey(mark_bit);
514 void IncrementalMarking::StartMarking(CompactionFlag flag) {
515 if (FLAG_trace_incremental_marking) {
516 PrintF("[IncrementalMarking] Start marking\n");
519 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
520 heap_->mark_compact_collector()->StartCompaction(
521 MarkCompactCollector::INCREMENTAL_COMPACTION);
525 RecordWriteStub::Mode mode = is_compacting_ ?
526 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
528 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
530 EnsureMarkingDequeIsCommitted();
532 // Initialize marking stack.
533 Address addr = static_cast<Address>(marking_deque_memory_->address());
534 size_t size = marking_deque_memory_->size();
535 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
536 marking_deque_.Initialize(addr, addr + size);
538 ActivateIncrementalWriteBarrier();
541 // Marking bits are cleared by the sweeper.
542 if (FLAG_verify_heap) {
543 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
547 heap_->CompletelyClearInstanceofCache();
548 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
550 if (FLAG_cleanup_code_caches_at_gc) {
551 // We will mark cache black with a separate pass
552 // when we finish marking.
553 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
556 // Mark strong roots grey.
557 IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
558 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
560 // Ready to start incremental marking.
561 if (FLAG_trace_incremental_marking) {
562 PrintF("[IncrementalMarking] Running\n");
567 void IncrementalMarking::PrepareForScavenge() {
568 if (!IsMarking()) return;
569 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
570 heap_->new_space()->FromSpaceEnd());
571 while (it.has_next()) {
572 Bitmap::Clear(it.next());
577 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
578 if (!IsMarking()) return;
580 int current = marking_deque_.bottom();
581 int mask = marking_deque_.mask();
582 int limit = marking_deque_.top();
583 HeapObject** array = marking_deque_.array();
584 int new_top = current;
586 Map* filler_map = heap_->one_pointer_filler_map();
588 while (current != limit) {
589 HeapObject* obj = array[current];
590 ASSERT(obj->IsHeapObject());
591 current = ((current + 1) & mask);
592 if (heap_->InNewSpace(obj)) {
593 MapWord map_word = obj->map_word();
594 if (map_word.IsForwardingAddress()) {
595 HeapObject* dest = map_word.ToForwardingAddress();
596 array[new_top] = dest;
597 new_top = ((new_top + 1) & mask);
598 ASSERT(new_top != marking_deque_.bottom());
600 MarkBit mark_bit = Marking::MarkBitFrom(obj);
601 ASSERT(Marking::IsGrey(mark_bit) ||
602 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
605 } else if (obj->map() != filler_map) {
606 // Skip one word filler objects that appear on the
607 // stack when we perform in place array shift.
608 array[new_top] = obj;
609 new_top = ((new_top + 1) & mask);
610 ASSERT(new_top != marking_deque_.bottom());
612 MarkBit mark_bit = Marking::MarkBitFrom(obj);
613 ASSERT(Marking::IsGrey(mark_bit) ||
614 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
618 marking_deque_.set_top(new_top);
620 steps_took_since_last_gc_ = 0;
621 steps_count_since_last_gc_ = 0;
626 void IncrementalMarking::VisitGlobalContext(Context* ctx, ObjectVisitor* v) {
628 HeapObject::RawField(
629 ctx, Context::MarkCompactBodyDescriptor::kStartOffset),
630 HeapObject::RawField(
631 ctx, Context::MarkCompactBodyDescriptor::kEndOffset));
633 MarkCompactCollector* collector = heap_->mark_compact_collector();
634 for (int idx = Context::FIRST_WEAK_SLOT;
635 idx < Context::GLOBAL_CONTEXT_SLOTS;
638 HeapObject::RawField(ctx, FixedArray::OffsetOfElementAt(idx));
639 collector->RecordSlot(slot, slot, *slot);
644 void IncrementalMarking::Hurry() {
645 if (state() == MARKING) {
647 if (FLAG_trace_incremental_marking) {
648 PrintF("[IncrementalMarking] Hurry\n");
649 start = OS::TimeCurrentMillis();
651 // TODO(gc) hurry can mark objects it encounters black as mutator
653 Map* filler_map = heap_->one_pointer_filler_map();
654 Map* global_context_map = heap_->global_context_map();
655 IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
656 while (!marking_deque_.IsEmpty()) {
657 HeapObject* obj = marking_deque_.Pop();
659 // Explicitly skip one word fillers. Incremental markbit patterns are
660 // correct only for objects that occupy at least two words.
661 Map* map = obj->map();
662 if (map == filler_map) {
664 } else if (map == global_context_map) {
665 // Global contexts have weak fields.
666 VisitGlobalContext(Context::cast(obj), &marking_visitor);
667 } else if (map->instance_type() == MAP_TYPE) {
668 Map* map = Map::cast(obj);
669 heap_->ClearCacheOnMap(map);
671 // When map collection is enabled we have to mark through map's
672 // transitions and back pointers in a special way to make these links
673 // weak. Only maps for subclasses of JSReceiver can have transitions.
674 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
675 if (FLAG_collect_maps &&
676 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
677 marker_.MarkMapContents(map);
679 marking_visitor.VisitPointers(
680 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
681 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
684 obj->Iterate(&marking_visitor);
687 MarkBit mark_bit = Marking::MarkBitFrom(obj);
688 ASSERT(!Marking::IsBlack(mark_bit));
689 Marking::MarkBlack(mark_bit);
690 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
693 if (FLAG_trace_incremental_marking) {
694 double end = OS::TimeCurrentMillis();
695 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
696 static_cast<int>(end - start));
700 if (FLAG_cleanup_code_caches_at_gc) {
701 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
702 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
703 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
704 PolymorphicCodeCache::kSize);
707 Object* context = heap_->global_contexts_list();
708 while (!context->IsUndefined()) {
709 // GC can happen when the context is not fully initialized,
710 // so the cache can be undefined.
711 HeapObject* cache = HeapObject::cast(
712 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
713 if (!cache->IsUndefined()) {
714 MarkBit mark_bit = Marking::MarkBitFrom(cache);
715 if (Marking::IsGrey(mark_bit)) {
716 Marking::GreyToBlack(mark_bit);
717 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
720 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
725 void IncrementalMarking::Abort() {
726 if (IsStopped()) return;
727 if (FLAG_trace_incremental_marking) {
728 PrintF("[IncrementalMarking] Aborting.\n");
730 heap_->new_space()->LowerInlineAllocationLimit(0);
731 IncrementalMarking::set_should_hurry(false);
734 PatchIncrementalMarkingRecordWriteStubs(heap_,
735 RecordWriteStub::STORE_BUFFER_ONLY);
736 DeactivateIncrementalWriteBarrier();
738 if (is_compacting_) {
739 LargeObjectIterator it(heap_->lo_space());
740 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
741 Page* p = Page::FromAddress(obj->address());
742 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
743 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
748 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
750 is_compacting_ = false;
754 void IncrementalMarking::Finalize() {
757 is_compacting_ = false;
758 heap_->new_space()->LowerInlineAllocationLimit(0);
759 IncrementalMarking::set_should_hurry(false);
761 PatchIncrementalMarkingRecordWriteStubs(heap_,
762 RecordWriteStub::STORE_BUFFER_ONLY);
763 DeactivateIncrementalWriteBarrier();
764 ASSERT(marking_deque_.IsEmpty());
765 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
769 void IncrementalMarking::MarkingComplete(CompletionAction action) {
771 // We will set the stack guard to request a GC now. This will mean the rest
772 // of the GC gets performed as soon as possible (we can't do a GC here in a
773 // record-write context). If a few things get allocated between now and then
774 // that shouldn't make us do a scavenge and keep being incremental, so we set
775 // the should-hurry flag to indicate that there can't be much work left to do.
776 set_should_hurry(true);
777 if (FLAG_trace_incremental_marking) {
778 PrintF("[IncrementalMarking] Complete (normal).\n");
780 if (action == GC_VIA_STACK_GUARD) {
781 heap_->isolate()->stack_guard()->RequestGC();
786 void IncrementalMarking::Step(intptr_t allocated_bytes,
787 CompletionAction action) {
788 if (heap_->gc_state() != Heap::NOT_IN_GC ||
789 !FLAG_incremental_marking ||
790 !FLAG_incremental_marking_steps ||
791 (state_ != SWEEPING && state_ != MARKING)) {
795 allocated_ += allocated_bytes;
797 if (allocated_ < kAllocatedThreshold) return;
799 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
801 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
802 bytes_scanned_ += bytes_to_process;
806 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
807 start = OS::TimeCurrentMillis();
810 if (state_ == SWEEPING) {
811 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
813 StartMarking(PREVENT_COMPACTION);
815 } else if (state_ == MARKING) {
816 Map* filler_map = heap_->one_pointer_filler_map();
817 Map* global_context_map = heap_->global_context_map();
818 IncrementalMarkingMarkingVisitor marking_visitor(heap_, this);
819 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
820 HeapObject* obj = marking_deque_.Pop();
822 // Explicitly skip one word fillers. Incremental markbit patterns are
823 // correct only for objects that occupy at least two words.
824 Map* map = obj->map();
825 if (map == filler_map) continue;
827 int size = obj->SizeFromMap(map);
828 bytes_to_process -= size;
829 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
830 if (Marking::IsWhite(map_mark_bit)) {
831 WhiteToGreyAndPush(map, map_mark_bit);
834 // TODO(gc) switch to static visitor instead of normal visitor.
835 if (map == global_context_map) {
836 // Global contexts have weak fields.
837 Context* ctx = Context::cast(obj);
839 // We will mark cache black with a separate pass
840 // when we finish marking.
841 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
843 VisitGlobalContext(ctx, &marking_visitor);
844 } else if (map->instance_type() == MAP_TYPE) {
845 Map* map = Map::cast(obj);
846 heap_->ClearCacheOnMap(map);
848 // When map collection is enabled we have to mark through map's
849 // transitions and back pointers in a special way to make these links
850 // weak. Only maps for subclasses of JSReceiver can have transitions.
851 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
852 if (FLAG_collect_maps &&
853 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
854 marker_.MarkMapContents(map);
856 marking_visitor.VisitPointers(
857 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
858 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
860 } else if (map->instance_type() == JS_FUNCTION_TYPE) {
861 marking_visitor.VisitPointers(
862 HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
863 HeapObject::RawField(obj, JSFunction::kCodeEntryOffset));
865 marking_visitor.VisitCodeEntry(
866 obj->address() + JSFunction::kCodeEntryOffset);
868 marking_visitor.VisitPointers(
869 HeapObject::RawField(obj,
870 JSFunction::kCodeEntryOffset + kPointerSize),
871 HeapObject::RawField(obj,
872 JSFunction::kNonWeakFieldsEndOffset));
874 obj->IterateBody(map->instance_type(), size, &marking_visitor);
877 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
878 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
879 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
880 Marking::MarkBlack(obj_mark_bit);
881 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
883 if (marking_deque_.IsEmpty()) MarkingComplete(action);
889 steps_count_since_last_gc_++;
891 bool speed_up = false;
893 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
895 PrintF("Speed up marking after %d steps\n",
896 static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
901 bool space_left_is_very_small =
902 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
904 bool only_1_nth_of_space_that_was_available_still_left =
905 (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
906 old_generation_space_available_at_start_of_incremental_);
908 if (space_left_is_very_small ||
909 only_1_nth_of_space_that_was_available_still_left) {
910 if (FLAG_trace_gc) PrintF("Speed up marking because of low space left\n");
914 bool size_of_old_space_multiplied_by_n_during_marking =
915 (heap_->PromotedTotalSize() >
916 (allocation_marking_factor_ + 1) *
917 old_generation_space_used_at_start_of_incremental_);
918 if (size_of_old_space_multiplied_by_n_during_marking) {
921 PrintF("Speed up marking because of heap size increase\n");
925 int64_t promoted_during_marking = heap_->PromotedTotalSize()
926 - old_generation_space_used_at_start_of_incremental_;
927 intptr_t delay = allocation_marking_factor_ * MB;
928 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
930 // We try to scan at at least twice the speed that we are allocating.
931 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
933 PrintF("Speed up marking because marker was not keeping up\n");
939 if (state_ != MARKING) {
941 PrintF("Postponing speeding up marking until marking starts\n");
944 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
945 allocation_marking_factor_ = static_cast<int>(
946 Min(kMaxAllocationMarkingFactor,
947 static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
949 PrintF("Marking speed increased to %d\n", allocation_marking_factor_);
954 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
955 double end = OS::TimeCurrentMillis();
956 double delta = (end - start);
957 longest_step_ = Max(longest_step_, delta);
958 steps_took_ += delta;
959 steps_took_since_last_gc_ += delta;
964 void IncrementalMarking::ResetStepCounters() {
968 old_generation_space_available_at_start_of_incremental_ =
969 SpaceLeftInOldSpace();
970 old_generation_space_used_at_start_of_incremental_ =
971 heap_->PromotedTotalSize();
972 steps_count_since_last_gc_ = 0;
973 steps_took_since_last_gc_ = 0;
974 bytes_rescanned_ = 0;
975 allocation_marking_factor_ = kInitialAllocationMarkingFactor;
980 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
981 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
984 } } // namespace v8::internal