1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
8 #include "src/base/bits.h"
9 #include "src/heap/spaces.h"
14 // Callback function, returns whether an object is alive. The heap size
15 // of the object is returned in size. It optionally updates the offset
16 // to the first live object in the page (only used for old and map objects).
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19 // Callback function to mark an object in a given heap.
20 typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
22 // Forward declarations.
24 class MarkCompactCollector;
26 class RootMarkingVisitor;
31 explicit Marking(Heap* heap) : heap_(heap) {}
33 INLINE(static MarkBit MarkBitFrom(Address addr));
35 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
36 return MarkBitFrom(reinterpret_cast<Address>(obj));
39 // Impossible markbits: 01
40 static const char* kImpossibleBitPattern;
41 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
42 return !mark_bit.Get() && mark_bit.Next().Get();
45 // Black markbits: 10 - this is required by the sweeper.
46 static const char* kBlackBitPattern;
47 INLINE(static bool IsBlack(MarkBit mark_bit)) {
48 return mark_bit.Get() && !mark_bit.Next().Get();
51 // White markbits: 00 - this is required by the mark bit clearer.
52 static const char* kWhiteBitPattern;
53 INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
56 static const char* kGreyBitPattern;
57 INLINE(static bool IsGrey(MarkBit mark_bit)) {
58 return mark_bit.Get() && mark_bit.Next().Get();
61 INLINE(static void MarkBlack(MarkBit mark_bit)) {
63 mark_bit.Next().Clear();
66 INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
68 INLINE(static void WhiteToGrey(MarkBit markbit)) {
73 INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
75 INLINE(static void BlackToGrey(HeapObject* obj)) {
76 BlackToGrey(MarkBitFrom(obj));
79 INLINE(static void AnyToGrey(MarkBit markbit)) {
84 void TransferMark(Address old_start, Address new_start);
94 static const char* ColorName(ObjectColor color) {
102 case IMPOSSIBLE_COLOR:
108 static ObjectColor Color(HeapObject* obj) {
109 return Color(Marking::MarkBitFrom(obj));
112 static ObjectColor Color(MarkBit mark_bit) {
113 if (IsBlack(mark_bit)) return BLACK_OBJECT;
114 if (IsWhite(mark_bit)) return WHITE_OBJECT;
115 if (IsGrey(mark_bit)) return GREY_OBJECT;
117 return IMPOSSIBLE_COLOR;
121 // Returns true if the transferred color is black.
122 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
123 MarkBit from_mark_bit = MarkBitFrom(from);
124 MarkBit to_mark_bit = MarkBitFrom(to);
125 bool is_black = false;
126 if (from_mark_bit.Get()) {
128 is_black = true; // Looks black so far.
130 if (from_mark_bit.Next().Get()) {
131 to_mark_bit.Next().Set();
132 is_black = false; // Was actually gray.
141 // ----------------------------------------------------------------------------
142 // Marking deque for tracing live objects.
146 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
148 void Initialize(Address low, Address high) {
149 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
150 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
152 mask_ = base::bits::RoundDownToPowerOfTwo32(
153 static_cast<uint32_t>(obj_high - obj_low)) -
159 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
161 inline bool IsEmpty() { return top_ == bottom_; }
163 bool overflowed() const { return overflowed_; }
165 void ClearOverflowed() { overflowed_ = false; }
167 void SetOverflowed() { overflowed_ = true; }
169 // Push the (marked) object on the marking stack if there is room,
170 // otherwise mark the object as overflowed and wait for a rescan of the
172 INLINE(void PushBlack(HeapObject* object)) {
173 DCHECK(object->IsHeapObject());
174 // TODO(jochen): Remove again before we branch for 4.2.
175 CHECK(object->IsHeapObject() && object->map()->IsMap());
177 Marking::BlackToGrey(object);
178 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
181 array_[top_] = object;
182 top_ = ((top_ + 1) & mask_);
186 INLINE(void PushGrey(HeapObject* object)) {
187 DCHECK(object->IsHeapObject());
188 // TODO(jochen): Remove again before we branch for 4.2.
189 CHECK(object->IsHeapObject() && object->map()->IsMap());
193 array_[top_] = object;
194 top_ = ((top_ + 1) & mask_);
198 INLINE(HeapObject* Pop()) {
200 top_ = ((top_ - 1) & mask_);
201 HeapObject* object = array_[top_];
202 DCHECK(object->IsHeapObject());
206 INLINE(void UnshiftGrey(HeapObject* object)) {
207 DCHECK(object->IsHeapObject());
211 bottom_ = ((bottom_ - 1) & mask_);
212 array_[bottom_] = object;
216 HeapObject** array() { return array_; }
217 int bottom() { return bottom_; }
218 int top() { return top_; }
219 int mask() { return mask_; }
220 void set_top(int top) { top_ = top; }
224 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
225 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
232 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
236 class SlotsBufferAllocator {
238 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
239 void DeallocateBuffer(SlotsBuffer* buffer);
241 void DeallocateChain(SlotsBuffer** buffer_address);
245 // SlotsBuffer records a sequence of slots that has to be updated
246 // after live objects were relocated from evacuation candidates.
247 // All slots are either untyped or typed:
248 // - Untyped slots are expected to contain a tagged object pointer.
249 // They are recorded by an address.
250 // - Typed slots are expected to contain an encoded pointer to a heap
251 // object where the way of encoding depends on the type of the slot.
252 // They are recorded as a pair (SlotType, slot address).
253 // We assume that zero-page is never mapped this allows us to distinguish
254 // untyped slots from typed slots during iteration by a simple comparison:
255 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
256 // is the first element of typed slot's pair.
259 typedef Object** ObjectSlot;
261 explicit SlotsBuffer(SlotsBuffer* next_buffer)
262 : idx_(0), chain_length_(1), next_(next_buffer) {
264 chain_length_ = next_->chain_length_ + 1;
270 void Add(ObjectSlot slot) {
271 DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
273 if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
274 DCHECK_NOT_NULL(*slot);
277 slots_[idx_++] = slot;
281 EMBEDDED_OBJECT_SLOT,
282 RELOCATED_CODE_OBJECT,
290 static const char* SlotTypeToString(SlotType type) {
292 case EMBEDDED_OBJECT_SLOT:
293 return "EMBEDDED_OBJECT_SLOT";
294 case RELOCATED_CODE_OBJECT:
295 return "RELOCATED_CODE_OBJECT";
296 case CODE_TARGET_SLOT:
297 return "CODE_TARGET_SLOT";
298 case CODE_ENTRY_SLOT:
299 return "CODE_ENTRY_SLOT";
300 case DEBUG_TARGET_SLOT:
301 return "DEBUG_TARGET_SLOT";
303 return "JS_RETURN_SLOT";
304 case NUMBER_OF_SLOT_TYPES:
305 return "NUMBER_OF_SLOT_TYPES";
307 return "UNKNOWN SlotType";
310 void UpdateSlots(Heap* heap);
312 void UpdateSlotsWithFilter(Heap* heap);
314 SlotsBuffer* next() { return next_; }
316 static int SizeOfChain(SlotsBuffer* buffer) {
317 if (buffer == NULL) return 0;
318 return static_cast<int>(buffer->idx_ +
319 (buffer->chain_length_ - 1) * kNumberOfElements);
322 inline bool IsFull() { return idx_ == kNumberOfElements; }
324 inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
326 static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
327 bool code_slots_filtering_required) {
328 while (buffer != NULL) {
329 if (code_slots_filtering_required) {
330 buffer->UpdateSlotsWithFilter(heap);
332 buffer->UpdateSlots(heap);
334 buffer = buffer->next();
338 enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
340 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
341 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
344 INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
345 SlotsBuffer** buffer_address, ObjectSlot slot,
346 AdditionMode mode)) {
347 SlotsBuffer* buffer = *buffer_address;
348 if (buffer == NULL || buffer->IsFull()) {
349 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
350 allocator->DeallocateChain(buffer_address);
353 buffer = allocator->AllocateBuffer(buffer);
354 *buffer_address = buffer;
360 static bool IsTypedSlot(ObjectSlot slot);
362 static bool AddTo(SlotsBufferAllocator* allocator,
363 SlotsBuffer** buffer_address, SlotType type, Address addr,
366 // Eliminates all stale entries from the slots buffer, i.e., slots that
367 // are not part of live objects anymore. This method must be called after
368 // marking, when the whole transitive closure is known and must be called
369 // before sweeping when mark bits are still intact.
370 static void RemoveInvalidSlots(Heap* heap, SlotsBuffer* buffer);
372 // Ensures that there are no invalid slots in the chain of slots buffers.
373 static void VerifySlots(Heap* heap, SlotsBuffer* buffer);
375 static const int kNumberOfElements = 1021;
378 static const int kChainLengthThreshold = 15;
381 intptr_t chain_length_;
383 ObjectSlot slots_[kNumberOfElements];
387 // CodeFlusher collects candidates for code flushing during marking and
388 // processes those candidates after marking has completed in order to
389 // reset those functions referencing code objects that would otherwise
390 // be unreachable. Code objects can be referenced in three ways:
391 // - SharedFunctionInfo references unoptimized code.
392 // - JSFunction references either unoptimized or optimized code.
393 // - OptimizedCodeMap references optimized code.
394 // We are not allowed to flush unoptimized code for functions that got
395 // optimized or inlined into optimized code, because we might bailout
396 // into the unoptimized code again during deoptimization.
399 explicit CodeFlusher(Isolate* isolate)
401 jsfunction_candidates_head_(NULL),
402 shared_function_info_candidates_head_(NULL),
403 optimized_code_map_holder_head_(NULL) {}
405 void AddCandidate(SharedFunctionInfo* shared_info) {
406 if (GetNextCandidate(shared_info) == NULL) {
407 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
408 shared_function_info_candidates_head_ = shared_info;
412 void AddCandidate(JSFunction* function) {
413 DCHECK(function->code() == function->shared()->code());
414 if (GetNextCandidate(function)->IsUndefined()) {
415 SetNextCandidate(function, jsfunction_candidates_head_);
416 jsfunction_candidates_head_ = function;
420 void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
421 if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
422 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
423 optimized_code_map_holder_head_ = code_map_holder;
427 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
428 void EvictCandidate(SharedFunctionInfo* shared_info);
429 void EvictCandidate(JSFunction* function);
431 void ProcessCandidates() {
432 ProcessOptimizedCodeMaps();
433 ProcessSharedFunctionInfoCandidates();
434 ProcessJSFunctionCandidates();
437 void EvictAllCandidates() {
438 EvictOptimizedCodeMaps();
439 EvictJSFunctionCandidates();
440 EvictSharedFunctionInfoCandidates();
443 void IteratePointersToFromSpace(ObjectVisitor* v);
446 void ProcessOptimizedCodeMaps();
447 void ProcessJSFunctionCandidates();
448 void ProcessSharedFunctionInfoCandidates();
449 void EvictOptimizedCodeMaps();
450 void EvictJSFunctionCandidates();
451 void EvictSharedFunctionInfoCandidates();
453 static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
454 return reinterpret_cast<JSFunction**>(
455 HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
458 static JSFunction* GetNextCandidate(JSFunction* candidate) {
459 Object* next_candidate = candidate->next_function_link();
460 return reinterpret_cast<JSFunction*>(next_candidate);
463 static void SetNextCandidate(JSFunction* candidate,
464 JSFunction* next_candidate) {
465 candidate->set_next_function_link(next_candidate);
468 static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
469 DCHECK(undefined->IsUndefined());
470 candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
473 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
474 Object* next_candidate = candidate->code()->gc_metadata();
475 return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
478 static void SetNextCandidate(SharedFunctionInfo* candidate,
479 SharedFunctionInfo* next_candidate) {
480 candidate->code()->set_gc_metadata(next_candidate);
483 static void ClearNextCandidate(SharedFunctionInfo* candidate) {
484 candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
487 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
488 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
489 Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
490 return reinterpret_cast<SharedFunctionInfo*>(next_map);
493 static void SetNextCodeMap(SharedFunctionInfo* holder,
494 SharedFunctionInfo* next_holder) {
495 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
496 code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
499 static void ClearNextCodeMap(SharedFunctionInfo* holder) {
500 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
501 code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
505 JSFunction* jsfunction_candidates_head_;
506 SharedFunctionInfo* shared_function_info_candidates_head_;
507 SharedFunctionInfo* optimized_code_map_holder_head_;
509 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
513 // Defined in isolate.h.
514 class ThreadLocalTop;
517 // -------------------------------------------------------------------------
518 // Mark-Compact collector
519 class MarkCompactCollector {
521 // Set the global flags, it must be called before Prepare to take effect.
522 inline void SetFlags(int flags);
524 static void Initialize();
530 void CollectEvacuationCandidates(PagedSpace* space);
532 void AddEvacuationCandidate(Page* p);
534 // Prepares for GC by resetting relocation info in old and map spaces and
535 // choosing spaces to compact.
538 // Performs a global garbage collection.
539 void CollectGarbage();
541 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
543 bool StartCompaction(CompactionMode mode);
545 void AbortCompaction();
548 // Checks whether performing mark-compact collection.
549 bool in_use() { return state_ > PREPARE_GC; }
550 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
553 // Determine type of object and emit deletion log event.
554 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
556 // Distinguishable invalid map encodings (for single word and multiple words)
557 // that indicate free regions.
558 static const uint32_t kSingleFreeEncoding = 0;
559 static const uint32_t kMultiFreeEncoding = 1;
561 static inline bool IsMarked(Object* obj);
562 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
564 inline Heap* heap() const { return heap_; }
565 inline Isolate* isolate() const;
567 CodeFlusher* code_flusher() { return code_flusher_; }
568 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
569 void EnableCodeFlushing(bool enable);
576 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
579 void VerifyMarkbitsAreClean();
580 static void VerifyMarkbitsAreClean(PagedSpace* space);
581 static void VerifyMarkbitsAreClean(NewSpace* space);
582 void VerifyWeakEmbeddedObjectsInCode();
583 void VerifyOmittedMapChecks();
586 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
587 return Page::FromAddress(reinterpret_cast<Address>(anchor))
588 ->ShouldSkipEvacuationSlotRecording();
591 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
592 return Page::FromAddress(reinterpret_cast<Address>(host))
593 ->ShouldSkipEvacuationSlotRecording();
596 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
597 return Page::FromAddress(reinterpret_cast<Address>(obj))
598 ->IsEvacuationCandidate();
601 INLINE(void EvictEvacuationCandidate(Page* page)) {
602 if (FLAG_trace_fragmentation) {
603 PrintF("Page %p is too popular. Disabling evacuation.\n",
604 reinterpret_cast<void*>(page));
607 // TODO(gc) If all evacuation candidates are too popular we
608 // should stop slots recording entirely.
609 page->ClearEvacuationCandidate();
611 // We were not collecting slots on this page that point
612 // to other evacuation candidates thus we have to
613 // rescan the page after evacuation to discover and update all
614 // pointers to evacuated objects.
615 if (page->owner()->identity() == OLD_DATA_SPACE) {
616 evacuation_candidates_.RemoveElement(page);
618 page->SetFlag(Page::RESCAN_ON_EVACUATION);
622 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
623 void RecordCodeEntrySlot(Address slot, Code* target);
624 void RecordCodeTargetPatch(Address pc, Code* target);
626 INLINE(void RecordSlot(
627 Object** anchor_slot, Object** slot, Object* object,
628 SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
630 void MigrateObject(HeapObject* dst, HeapObject* src, int size,
631 AllocationSpace to_old_space);
633 bool TryPromoteObject(HeapObject* object, int object_size);
635 void InvalidateCode(Code* code);
637 void ClearMarkbits();
639 bool abort_incremental_marking() const { return abort_incremental_marking_; }
641 bool is_compacting() const { return compacting_; }
643 MarkingParity marking_parity() { return marking_parity_; }
645 // Concurrent and parallel sweeping support. If required_freed_bytes was set
646 // to a value larger than 0, then sweeping returns after a block of at least
647 // required_freed_bytes was freed. If required_freed_bytes was set to zero
648 // then the whole given space is swept. It returns the size of the maximum
649 // continuous freed memory chunk.
650 int SweepInParallel(PagedSpace* space, int required_freed_bytes);
652 // Sweeps a given page concurrently to the sweeper threads. It returns the
653 // size of the maximum continuous freed memory chunk.
654 int SweepInParallel(Page* page, PagedSpace* space);
656 void EnsureSweepingCompleted();
658 // If sweeper threads are not active this method will return true. If
659 // this is a latency issue we should be smarter here. Otherwise, it will
660 // return true if the sweeper threads are done processing the pages.
661 bool IsSweepingCompleted();
663 void RefillFreeList(PagedSpace* space);
665 // Checks if sweeping is in progress right now on any space.
666 bool sweeping_in_progress() { return sweeping_in_progress_; }
668 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
670 bool evacuation() const { return evacuation_; }
672 // Special case for processing weak references in a full collection. We need
673 // to artificially keep AllocationSites alive for a time.
674 void MarkAllocationSite(AllocationSite* site);
676 // Mark objects in implicit references groups if their parent object
678 void MarkImplicitRefGroups(MarkObjectFunction mark_object);
680 MarkingDeque* marking_deque() { return &marking_deque_; }
682 void EnsureMarkingDequeIsCommittedAndInitialize();
684 void InitializeMarkingDeque();
686 void UncommitMarkingDeque();
688 // The following four methods can just be called after marking, when the
689 // whole transitive closure is known. They must be called before sweeping
690 // when mark bits are still intact.
691 bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
692 bool IsSlotInBlackObjectSlow(Page* p, Address slot);
693 bool IsSlotInLiveObject(Address slot);
694 void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
699 explicit MarkCompactCollector(Heap* heap);
700 ~MarkCompactCollector();
702 bool MarkInvalidatedCode();
703 bool WillBeDeoptimized(Code* code);
704 void RemoveDeadInvalidatedCode();
705 void ProcessInvalidatedCode(ObjectVisitor* visitor);
706 void ClearInvalidSlotsBufferEntries(PagedSpace* space);
707 void ClearInvalidStoreAndSlotsBufferEntries();
709 void StartSweeperThreads();
712 enum CollectorState {
717 ENCODE_FORWARDING_ADDRESSES,
722 // The current stage of the collector.
723 CollectorState state_;
726 bool reduce_memory_footprint_;
728 bool abort_incremental_marking_;
730 MarkingParity marking_parity_;
732 // True if we are collecting slots to perform evacuation from evacuation
736 bool was_marked_incrementally_;
738 // True if concurrent or parallel sweeping is currently in progress.
739 bool sweeping_in_progress_;
741 base::Semaphore pending_sweeper_jobs_semaphore_;
745 SlotsBufferAllocator slots_buffer_allocator_;
747 SlotsBuffer* migration_slots_buffer_;
749 // Finishes GC, performs heap verification if enabled.
752 // -----------------------------------------------------------------------
753 // Phase 1: Marking live objects.
755 // Before: The heap has been prepared for garbage collection by
756 // MarkCompactCollector::Prepare() and is otherwise in its
759 // After: Live objects are marked and non-live objects are unmarked.
761 friend class RootMarkingVisitor;
762 friend class MarkingVisitor;
763 friend class MarkCompactMarkingVisitor;
764 friend class CodeMarkingVisitor;
765 friend class SharedFunctionInfoMarkingVisitor;
767 // Mark code objects that are active on the stack to prevent them
768 // from being flushed.
769 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
771 void PrepareForCodeFlushing();
773 // Marking operations for objects reachable from roots.
774 void MarkLiveObjects();
778 // Marks the object black and pushes it on the marking stack.
779 // This is for non-incremental marking only.
780 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
782 // Marks the object black assuming that it is not yet marked.
783 // This is for non-incremental marking only.
784 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
786 // Mark the heap roots and all objects reachable from them.
787 void MarkRoots(RootMarkingVisitor* visitor);
789 // Mark the string table specially. References to internalized strings from
790 // the string table are weak.
791 void MarkStringTable(RootMarkingVisitor* visitor);
793 // Mark objects reachable (transitively) from objects in the marking stack
794 // or overflowed in the heap.
795 void ProcessMarkingDeque();
797 // Mark objects reachable (transitively) from objects in the marking stack
798 // or overflowed in the heap. This respects references only considered in
799 // the final atomic marking pause including the following:
800 // - Processing of objects reachable through Harmony WeakMaps.
801 // - Objects reachable due to host application logic like object groups
802 // or implicit references' groups.
803 void ProcessEphemeralMarking(ObjectVisitor* visitor,
804 bool only_process_harmony_weak_collections);
806 // If the call-site of the top optimized code was not prepared for
807 // deoptimization, then treat the maps in the code as strong pointers,
808 // otherwise a map can die and deoptimize the code.
809 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
811 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
812 // increase chances of reusing of map transition tree in future.
815 // Mark objects reachable (transitively) from objects in the marking
816 // stack. This function empties the marking stack, but may leave
817 // overflowed objects in the heap, in which case the marking stack's
818 // overflow flag will be set.
819 void EmptyMarkingDeque();
821 // Refill the marking stack with overflowed objects from the heap. This
822 // function either leaves the marking stack full or clears the overflow
823 // flag on the marking stack.
824 void RefillMarkingDeque();
826 // Callback function for telling whether the object *p is an unmarked
828 static bool IsUnmarkedHeapObject(Object** p);
830 // Map transitions from a live map to a dead map must be killed.
831 // We replace them with a null descriptor, with the same key.
832 void ClearNonLiveReferences();
833 void ClearNonLivePrototypeTransitions(Map* map);
834 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
835 void ClearMapTransitions(Map* map, Map* dead_transition);
836 bool ClearMapBackPointer(Map* map);
837 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
838 int number_of_own_descriptors);
839 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
841 // Mark all values associated with reachable keys in weak collections
842 // encountered so far. This might push new object or even new weak maps onto
843 // the marking stack.
844 void ProcessWeakCollections();
846 // After all reachable objects have been marked those weak map entries
847 // with an unreachable key are removed from all encountered weak maps.
848 // The linked list of all encountered weak maps is destroyed.
849 void ClearWeakCollections();
851 // We have to remove all encountered weak maps from the list of weak
852 // collections when incremental marking is aborted.
853 void AbortWeakCollections();
856 void ProcessAndClearWeakCells();
857 void AbortWeakCells();
859 // -----------------------------------------------------------------------
860 // Phase 2: Sweeping to clear mark bits and free non-live objects for
861 // a non-compacting collection.
863 // Before: Live objects are marked and non-live objects are unmarked.
865 // After: Live objects are unmarked, non-live regions have been added to
866 // their space's free list. Active eden semispace is compacted by
870 // If we are not compacting the heap, we simply sweep the spaces except
871 // for the large object space, clearing mark bits and adding unmarked
872 // regions to each space's free list.
875 int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
878 void EvacuateNewSpace();
880 void EvacuateLiveObjectsFromPage(Page* p);
882 void EvacuatePages();
884 void EvacuateNewSpaceAndCandidates();
886 void ReleaseEvacuationCandidates();
888 // Moves the pages of the evacuation_candidates_ list to the end of their
889 // corresponding space pages list.
890 void MoveEvacuationCandidatesToEndOfPagesList();
892 void SweepSpace(PagedSpace* space, SweeperType sweeper);
894 // Finalizes the parallel sweeping phase. Marks all the pages that were
895 // swept in parallel.
896 void ParallelSweepSpacesComplete();
898 void ParallelSweepSpaceComplete(PagedSpace* space);
900 // Updates store buffer and slot buffer for a pointer in a migrating object.
901 void RecordMigratedSlot(Object* value, Address slot);
904 friend class MarkObjectVisitor;
905 static void VisitObject(HeapObject* obj);
907 friend class UnmarkObjectVisitor;
908 static void UnmarkObject(HeapObject* obj);
912 base::VirtualMemory* marking_deque_memory_;
913 bool marking_deque_memory_committed_;
914 MarkingDeque marking_deque_;
915 CodeFlusher* code_flusher_;
916 bool have_code_to_deoptimize_;
918 List<Page*> evacuation_candidates_;
919 List<Code*> invalidated_code_;
921 SmartPointer<FreeList> free_list_old_data_space_;
922 SmartPointer<FreeList> free_list_old_pointer_space_;
928 class MarkBitCellIterator BASE_EMBEDDED {
930 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
931 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
932 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
933 cell_base_ = chunk_->area_start();
934 cell_index_ = Bitmap::IndexToCell(
935 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
936 cells_ = chunk_->markbits()->cells();
939 inline bool Done() { return cell_index_ == last_cell_index_; }
941 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
943 inline MarkBit::CellType* CurrentCell() {
944 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
945 chunk_->AddressToMarkbitIndex(cell_base_))));
946 return &cells_[cell_index_];
949 inline Address CurrentCellBase() {
950 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
951 chunk_->AddressToMarkbitIndex(cell_base_))));
955 inline void Advance() {
957 cell_base_ += 32 * kPointerSize;
962 MarkBit::CellType* cells_;
963 unsigned int last_cell_index_;
964 unsigned int cell_index_;
969 class EvacuationScope BASE_EMBEDDED {
971 explicit EvacuationScope(MarkCompactCollector* collector)
972 : collector_(collector) {
973 collector_->set_evacuation(true);
976 ~EvacuationScope() { collector_->set_evacuation(false); }
979 MarkCompactCollector* collector_;
983 const char* AllocationSpaceName(AllocationSpace space);
985 } // namespace v8::internal
987 #endif // V8_HEAP_MARK_COMPACT_H_