1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
8 #include "src/base/bits.h"
9 #include "src/heap/spaces.h"
14 // Callback function, returns whether an object is alive. The heap size
15 // of the object is returned in size. It optionally updates the offset
16 // to the first live object in the page (only used for old and map objects).
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19 // Forward declarations.
21 class MarkCompactCollector;
23 class RootMarkingVisitor;
28 explicit Marking(Heap* heap) : heap_(heap) {}
30 INLINE(static MarkBit MarkBitFrom(Address addr));
32 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
33 return MarkBitFrom(reinterpret_cast<Address>(obj));
36 // Impossible markbits: 01
37 static const char* kImpossibleBitPattern;
38 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
39 return !mark_bit.Get() && mark_bit.Next().Get();
42 // Black markbits: 10 - this is required by the sweeper.
43 static const char* kBlackBitPattern;
44 INLINE(static bool IsBlack(MarkBit mark_bit)) {
45 return mark_bit.Get() && !mark_bit.Next().Get();
48 // White markbits: 00 - this is required by the mark bit clearer.
49 static const char* kWhiteBitPattern;
50 INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
53 static const char* kGreyBitPattern;
54 INLINE(static bool IsGrey(MarkBit mark_bit)) {
55 return mark_bit.Get() && mark_bit.Next().Get();
58 INLINE(static void MarkBlack(MarkBit mark_bit)) {
60 mark_bit.Next().Clear();
63 INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
65 INLINE(static void WhiteToGrey(MarkBit markbit)) {
70 INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
72 INLINE(static void BlackToGrey(HeapObject* obj)) {
73 BlackToGrey(MarkBitFrom(obj));
76 INLINE(static void AnyToGrey(MarkBit markbit)) {
81 void TransferMark(Address old_start, Address new_start);
91 static const char* ColorName(ObjectColor color) {
99 case IMPOSSIBLE_COLOR:
105 static ObjectColor Color(HeapObject* obj) {
106 return Color(Marking::MarkBitFrom(obj));
109 static ObjectColor Color(MarkBit mark_bit) {
110 if (IsBlack(mark_bit)) return BLACK_OBJECT;
111 if (IsWhite(mark_bit)) return WHITE_OBJECT;
112 if (IsGrey(mark_bit)) return GREY_OBJECT;
114 return IMPOSSIBLE_COLOR;
118 // Returns true if the transferred color is black.
119 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
120 MarkBit from_mark_bit = MarkBitFrom(from);
121 MarkBit to_mark_bit = MarkBitFrom(to);
122 bool is_black = false;
123 if (from_mark_bit.Get()) {
125 is_black = true; // Looks black so far.
127 if (from_mark_bit.Next().Get()) {
128 to_mark_bit.Next().Set();
129 is_black = false; // Was actually gray.
138 // ----------------------------------------------------------------------------
139 // Marking deque for tracing live objects.
143 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
145 void Initialize(Address low, Address high) {
146 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
147 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
149 mask_ = base::bits::RoundDownToPowerOfTwo32(
150 static_cast<uint32_t>(obj_high - obj_low)) -
156 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
158 inline bool IsEmpty() { return top_ == bottom_; }
160 bool overflowed() const { return overflowed_; }
162 void ClearOverflowed() { overflowed_ = false; }
164 void SetOverflowed() { overflowed_ = true; }
166 // Push the (marked) object on the marking stack if there is room,
167 // otherwise mark the object as overflowed and wait for a rescan of the
169 INLINE(void PushBlack(HeapObject* object)) {
170 DCHECK(object->IsHeapObject());
171 // TODO(jochen): Remove again before we branch for 4.2.
172 CHECK(object->IsHeapObject() && object->map()->IsMap());
174 Marking::BlackToGrey(object);
175 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
178 array_[top_] = object;
179 top_ = ((top_ + 1) & mask_);
183 INLINE(void PushGrey(HeapObject* object)) {
184 DCHECK(object->IsHeapObject());
185 // TODO(jochen): Remove again before we branch for 4.2.
186 CHECK(object->IsHeapObject() && object->map()->IsMap());
190 array_[top_] = object;
191 top_ = ((top_ + 1) & mask_);
195 INLINE(HeapObject* Pop()) {
197 top_ = ((top_ - 1) & mask_);
198 HeapObject* object = array_[top_];
199 DCHECK(object->IsHeapObject());
203 INLINE(void UnshiftGrey(HeapObject* object)) {
204 DCHECK(object->IsHeapObject());
208 bottom_ = ((bottom_ - 1) & mask_);
209 array_[bottom_] = object;
213 HeapObject** array() { return array_; }
214 int bottom() { return bottom_; }
215 int top() { return top_; }
216 int mask() { return mask_; }
217 void set_top(int top) { top_ = top; }
221 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
222 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
229 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
233 class SlotsBufferAllocator {
235 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
236 void DeallocateBuffer(SlotsBuffer* buffer);
238 void DeallocateChain(SlotsBuffer** buffer_address);
242 // SlotsBuffer records a sequence of slots that has to be updated
243 // after live objects were relocated from evacuation candidates.
244 // All slots are either untyped or typed:
245 // - Untyped slots are expected to contain a tagged object pointer.
246 // They are recorded by an address.
247 // - Typed slots are expected to contain an encoded pointer to a heap
248 // object where the way of encoding depends on the type of the slot.
249 // They are recorded as a pair (SlotType, slot address).
250 // We assume that zero-page is never mapped this allows us to distinguish
251 // untyped slots from typed slots during iteration by a simple comparison:
252 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
253 // is the first element of typed slot's pair.
256 typedef Object** ObjectSlot;
258 explicit SlotsBuffer(SlotsBuffer* next_buffer)
259 : idx_(0), chain_length_(1), next_(next_buffer) {
261 chain_length_ = next_->chain_length_ + 1;
267 void Add(ObjectSlot slot) {
268 DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
270 if (slot >= reinterpret_cast<ObjectSlot>(NUMBER_OF_SLOT_TYPES)) {
271 DCHECK_NOT_NULL(*slot);
274 slots_[idx_++] = slot;
278 EMBEDDED_OBJECT_SLOT,
279 RELOCATED_CODE_OBJECT,
287 static const char* SlotTypeToString(SlotType type) {
289 case EMBEDDED_OBJECT_SLOT:
290 return "EMBEDDED_OBJECT_SLOT";
291 case RELOCATED_CODE_OBJECT:
292 return "RELOCATED_CODE_OBJECT";
293 case CODE_TARGET_SLOT:
294 return "CODE_TARGET_SLOT";
295 case CODE_ENTRY_SLOT:
296 return "CODE_ENTRY_SLOT";
297 case DEBUG_TARGET_SLOT:
298 return "DEBUG_TARGET_SLOT";
300 return "JS_RETURN_SLOT";
301 case NUMBER_OF_SLOT_TYPES:
302 return "NUMBER_OF_SLOT_TYPES";
304 return "UNKNOWN SlotType";
307 void UpdateSlots(Heap* heap);
309 void UpdateSlotsWithFilter(Heap* heap);
311 SlotsBuffer* next() { return next_; }
313 static int SizeOfChain(SlotsBuffer* buffer) {
314 if (buffer == NULL) return 0;
315 return static_cast<int>(buffer->idx_ +
316 (buffer->chain_length_ - 1) * kNumberOfElements);
319 inline bool IsFull() { return idx_ == kNumberOfElements; }
321 inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
323 static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
324 bool code_slots_filtering_required) {
325 while (buffer != NULL) {
326 if (code_slots_filtering_required) {
327 buffer->UpdateSlotsWithFilter(heap);
329 buffer->UpdateSlots(heap);
331 buffer = buffer->next();
335 enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
337 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
338 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
341 INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
342 SlotsBuffer** buffer_address, ObjectSlot slot,
343 AdditionMode mode)) {
344 SlotsBuffer* buffer = *buffer_address;
345 if (buffer == NULL || buffer->IsFull()) {
346 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
347 allocator->DeallocateChain(buffer_address);
350 buffer = allocator->AllocateBuffer(buffer);
351 *buffer_address = buffer;
357 static bool IsTypedSlot(ObjectSlot slot);
359 static bool AddTo(SlotsBufferAllocator* allocator,
360 SlotsBuffer** buffer_address, SlotType type, Address addr,
363 static const int kNumberOfElements = 1021;
366 static const int kChainLengthThreshold = 15;
369 intptr_t chain_length_;
371 ObjectSlot slots_[kNumberOfElements];
375 // CodeFlusher collects candidates for code flushing during marking and
376 // processes those candidates after marking has completed in order to
377 // reset those functions referencing code objects that would otherwise
378 // be unreachable. Code objects can be referenced in three ways:
379 // - SharedFunctionInfo references unoptimized code.
380 // - JSFunction references either unoptimized or optimized code.
381 // - OptimizedCodeMap references optimized code.
382 // We are not allowed to flush unoptimized code for functions that got
383 // optimized or inlined into optimized code, because we might bailout
384 // into the unoptimized code again during deoptimization.
387 explicit CodeFlusher(Isolate* isolate)
389 jsfunction_candidates_head_(NULL),
390 shared_function_info_candidates_head_(NULL),
391 optimized_code_map_holder_head_(NULL) {}
393 void AddCandidate(SharedFunctionInfo* shared_info) {
394 if (GetNextCandidate(shared_info) == NULL) {
395 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
396 shared_function_info_candidates_head_ = shared_info;
400 void AddCandidate(JSFunction* function) {
401 DCHECK(function->code() == function->shared()->code());
402 if (GetNextCandidate(function)->IsUndefined()) {
403 SetNextCandidate(function, jsfunction_candidates_head_);
404 jsfunction_candidates_head_ = function;
408 void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
409 if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
410 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
411 optimized_code_map_holder_head_ = code_map_holder;
415 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
416 void EvictCandidate(SharedFunctionInfo* shared_info);
417 void EvictCandidate(JSFunction* function);
419 void ProcessCandidates() {
420 ProcessOptimizedCodeMaps();
421 ProcessSharedFunctionInfoCandidates();
422 ProcessJSFunctionCandidates();
425 void EvictAllCandidates() {
426 EvictOptimizedCodeMaps();
427 EvictJSFunctionCandidates();
428 EvictSharedFunctionInfoCandidates();
431 void IteratePointersToFromSpace(ObjectVisitor* v);
434 void ProcessOptimizedCodeMaps();
435 void ProcessJSFunctionCandidates();
436 void ProcessSharedFunctionInfoCandidates();
437 void EvictOptimizedCodeMaps();
438 void EvictJSFunctionCandidates();
439 void EvictSharedFunctionInfoCandidates();
441 static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
442 return reinterpret_cast<JSFunction**>(
443 HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
446 static JSFunction* GetNextCandidate(JSFunction* candidate) {
447 Object* next_candidate = candidate->next_function_link();
448 return reinterpret_cast<JSFunction*>(next_candidate);
451 static void SetNextCandidate(JSFunction* candidate,
452 JSFunction* next_candidate) {
453 candidate->set_next_function_link(next_candidate);
456 static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
457 DCHECK(undefined->IsUndefined());
458 candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
461 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
462 Object* next_candidate = candidate->code()->gc_metadata();
463 return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
466 static void SetNextCandidate(SharedFunctionInfo* candidate,
467 SharedFunctionInfo* next_candidate) {
468 candidate->code()->set_gc_metadata(next_candidate);
471 static void ClearNextCandidate(SharedFunctionInfo* candidate) {
472 candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
475 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
476 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
477 Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
478 return reinterpret_cast<SharedFunctionInfo*>(next_map);
481 static void SetNextCodeMap(SharedFunctionInfo* holder,
482 SharedFunctionInfo* next_holder) {
483 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
484 code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
487 static void ClearNextCodeMap(SharedFunctionInfo* holder) {
488 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
489 code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
493 JSFunction* jsfunction_candidates_head_;
494 SharedFunctionInfo* shared_function_info_candidates_head_;
495 SharedFunctionInfo* optimized_code_map_holder_head_;
497 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
501 // Defined in isolate.h.
502 class ThreadLocalTop;
505 // -------------------------------------------------------------------------
506 // Mark-Compact collector
507 class MarkCompactCollector {
509 // Set the global flags, it must be called before Prepare to take effect.
510 inline void SetFlags(int flags);
512 static void Initialize();
518 void CollectEvacuationCandidates(PagedSpace* space);
520 void AddEvacuationCandidate(Page* p);
522 // Prepares for GC by resetting relocation info in old and map spaces and
523 // choosing spaces to compact.
526 // Performs a global garbage collection.
527 void CollectGarbage();
529 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
531 bool StartCompaction(CompactionMode mode);
533 void AbortCompaction();
536 // Checks whether performing mark-compact collection.
537 bool in_use() { return state_ > PREPARE_GC; }
538 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
541 // Determine type of object and emit deletion log event.
542 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
544 // Distinguishable invalid map encodings (for single word and multiple words)
545 // that indicate free regions.
546 static const uint32_t kSingleFreeEncoding = 0;
547 static const uint32_t kMultiFreeEncoding = 1;
549 static inline bool IsMarked(Object* obj);
551 inline Heap* heap() const { return heap_; }
552 inline Isolate* isolate() const;
554 CodeFlusher* code_flusher() { return code_flusher_; }
555 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
556 void EnableCodeFlushing(bool enable);
563 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
566 void VerifyMarkbitsAreClean();
567 static void VerifyMarkbitsAreClean(PagedSpace* space);
568 static void VerifyMarkbitsAreClean(NewSpace* space);
569 void VerifyWeakEmbeddedObjectsInCode();
570 void VerifyOmittedMapChecks();
573 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
574 return Page::FromAddress(reinterpret_cast<Address>(anchor))
575 ->ShouldSkipEvacuationSlotRecording();
578 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
579 return Page::FromAddress(reinterpret_cast<Address>(host))
580 ->ShouldSkipEvacuationSlotRecording();
583 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
584 return Page::FromAddress(reinterpret_cast<Address>(obj))
585 ->IsEvacuationCandidate();
588 INLINE(void EvictEvacuationCandidate(Page* page)) {
589 if (FLAG_trace_fragmentation) {
590 PrintF("Page %p is too popular. Disabling evacuation.\n",
591 reinterpret_cast<void*>(page));
594 // TODO(gc) If all evacuation candidates are too popular we
595 // should stop slots recording entirely.
596 page->ClearEvacuationCandidate();
598 // We were not collecting slots on this page that point
599 // to other evacuation candidates thus we have to
600 // rescan the page after evacuation to discover and update all
601 // pointers to evacuated objects.
602 if (page->owner()->identity() == OLD_DATA_SPACE) {
603 evacuation_candidates_.RemoveElement(page);
605 page->SetFlag(Page::RESCAN_ON_EVACUATION);
609 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
610 void RecordCodeEntrySlot(Address slot, Code* target);
611 void RecordCodeTargetPatch(Address pc, Code* target);
613 INLINE(void RecordSlot(
614 Object** anchor_slot, Object** slot, Object* object,
615 SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
617 void MigrateObject(HeapObject* dst, HeapObject* src, int size,
618 AllocationSpace to_old_space);
620 bool TryPromoteObject(HeapObject* object, int object_size);
622 void InvalidateCode(Code* code);
624 void ClearMarkbits();
626 bool abort_incremental_marking() const { return abort_incremental_marking_; }
628 bool is_compacting() const { return compacting_; }
630 MarkingParity marking_parity() { return marking_parity_; }
632 // Concurrent and parallel sweeping support. If required_freed_bytes was set
633 // to a value larger than 0, then sweeping returns after a block of at least
634 // required_freed_bytes was freed. If required_freed_bytes was set to zero
635 // then the whole given space is swept. It returns the size of the maximum
636 // continuous freed memory chunk.
637 int SweepInParallel(PagedSpace* space, int required_freed_bytes);
639 // Sweeps a given page concurrently to the sweeper threads. It returns the
640 // size of the maximum continuous freed memory chunk.
641 int SweepInParallel(Page* page, PagedSpace* space);
643 void EnsureSweepingCompleted();
645 // If sweeper threads are not active this method will return true. If
646 // this is a latency issue we should be smarter here. Otherwise, it will
647 // return true if the sweeper threads are done processing the pages.
648 bool IsSweepingCompleted();
650 void RefillFreeList(PagedSpace* space);
652 // Checks if sweeping is in progress right now on any space.
653 bool sweeping_in_progress() { return sweeping_in_progress_; }
655 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
657 bool evacuation() const { return evacuation_; }
659 // Special case for processing weak references in a full collection. We need
660 // to artificially keep AllocationSites alive for a time.
661 void MarkAllocationSite(AllocationSite* site);
663 MarkingDeque* marking_deque() { return &marking_deque_; }
665 void EnsureMarkingDequeIsCommittedAndInitialize();
667 void InitializeMarkingDeque();
669 void UncommitMarkingDeque();
671 void OverApproximateWeakClosure();
676 explicit MarkCompactCollector(Heap* heap);
677 ~MarkCompactCollector();
679 bool MarkInvalidatedCode();
680 bool WillBeDeoptimized(Code* code);
681 void RemoveDeadInvalidatedCode();
682 void ProcessInvalidatedCode(ObjectVisitor* visitor);
684 void StartSweeperThreads();
687 enum CollectorState {
692 ENCODE_FORWARDING_ADDRESSES,
697 // The current stage of the collector.
698 CollectorState state_;
701 bool reduce_memory_footprint_;
703 bool abort_incremental_marking_;
705 MarkingParity marking_parity_;
707 // True if we are collecting slots to perform evacuation from evacuation
711 bool was_marked_incrementally_;
713 // True if concurrent or parallel sweeping is currently in progress.
714 bool sweeping_in_progress_;
716 base::Semaphore pending_sweeper_jobs_semaphore_;
720 SlotsBufferAllocator slots_buffer_allocator_;
722 SlotsBuffer* migration_slots_buffer_;
724 // Finishes GC, performs heap verification if enabled.
727 // -----------------------------------------------------------------------
728 // Phase 1: Marking live objects.
730 // Before: The heap has been prepared for garbage collection by
731 // MarkCompactCollector::Prepare() and is otherwise in its
734 // After: Live objects are marked and non-live objects are unmarked.
736 friend class RootMarkingVisitor;
737 friend class MarkingVisitor;
738 friend class MarkCompactMarkingVisitor;
739 friend class CodeMarkingVisitor;
740 friend class SharedFunctionInfoMarkingVisitor;
742 // Mark code objects that are active on the stack to prevent them
743 // from being flushed.
744 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
746 void PrepareForCodeFlushing();
748 // Marking operations for objects reachable from roots.
749 void MarkLiveObjects();
753 // Marks the object black and pushes it on the marking stack.
754 // This is for non-incremental marking only.
755 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
757 // Marks the object black assuming that it is not yet marked.
758 // This is for non-incremental marking only.
759 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
761 // Mark the heap roots and all objects reachable from them.
762 void MarkRoots(RootMarkingVisitor* visitor);
764 // Mark the string table specially. References to internalized strings from
765 // the string table are weak.
766 void MarkStringTable(RootMarkingVisitor* visitor);
768 // Mark objects in implicit references groups if their parent object
770 void MarkImplicitRefGroups();
772 // Mark objects reachable (transitively) from objects in the marking stack
773 // or overflowed in the heap.
774 void ProcessMarkingDeque();
776 // Mark objects reachable (transitively) from objects in the marking stack
777 // or overflowed in the heap. This respects references only considered in
778 // the final atomic marking pause including the following:
779 // - Processing of objects reachable through Harmony WeakMaps.
780 // - Objects reachable due to host application logic like object groups
781 // or implicit references' groups.
782 void ProcessEphemeralMarking(ObjectVisitor* visitor,
783 bool only_process_harmony_weak_collections);
785 // If the call-site of the top optimized code was not prepared for
786 // deoptimization, then treat the maps in the code as strong pointers,
787 // otherwise a map can die and deoptimize the code.
788 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
790 // Mark objects reachable (transitively) from objects in the marking
791 // stack. This function empties the marking stack, but may leave
792 // overflowed objects in the heap, in which case the marking stack's
793 // overflow flag will be set.
794 void EmptyMarkingDeque();
796 // Refill the marking stack with overflowed objects from the heap. This
797 // function either leaves the marking stack full or clears the overflow
798 // flag on the marking stack.
799 void RefillMarkingDeque();
801 // Callback function for telling whether the object *p is an unmarked
803 static bool IsUnmarkedHeapObject(Object** p);
804 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
806 // Map transitions from a live map to a dead map must be killed.
807 // We replace them with a null descriptor, with the same key.
808 void ClearNonLiveReferences();
809 void ClearNonLivePrototypeTransitions(Map* map);
810 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
811 void ClearMapTransitions(Map* map);
812 bool ClearMapBackPointer(Map* map);
813 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
814 int number_of_own_descriptors);
815 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
817 // Mark all values associated with reachable keys in weak collections
818 // encountered so far. This might push new object or even new weak maps onto
819 // the marking stack.
820 void ProcessWeakCollections();
822 // After all reachable objects have been marked those weak map entries
823 // with an unreachable key are removed from all encountered weak maps.
824 // The linked list of all encountered weak maps is destroyed.
825 void ClearWeakCollections();
827 // We have to remove all encountered weak maps from the list of weak
828 // collections when incremental marking is aborted.
829 void AbortWeakCollections();
832 void ProcessAndClearWeakCells();
833 void AbortWeakCells();
835 // -----------------------------------------------------------------------
836 // Phase 2: Sweeping to clear mark bits and free non-live objects for
837 // a non-compacting collection.
839 // Before: Live objects are marked and non-live objects are unmarked.
841 // After: Live objects are unmarked, non-live regions have been added to
842 // their space's free list. Active eden semispace is compacted by
846 // If we are not compacting the heap, we simply sweep the spaces except
847 // for the large object space, clearing mark bits and adding unmarked
848 // regions to each space's free list.
851 int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
854 void EvacuateNewSpace();
856 void EvacuateLiveObjectsFromPage(Page* p);
858 void EvacuatePages();
860 void EvacuateNewSpaceAndCandidates();
862 void ReleaseEvacuationCandidates();
864 // Moves the pages of the evacuation_candidates_ list to the end of their
865 // corresponding space pages list.
866 void MoveEvacuationCandidatesToEndOfPagesList();
868 void SweepSpace(PagedSpace* space, SweeperType sweeper);
870 // Finalizes the parallel sweeping phase. Marks all the pages that were
871 // swept in parallel.
872 void ParallelSweepSpacesComplete();
874 void ParallelSweepSpaceComplete(PagedSpace* space);
876 // Updates store buffer and slot buffer for a pointer in a migrating object.
877 void RecordMigratedSlot(Object* value, Address slot);
880 friend class MarkObjectVisitor;
881 static void VisitObject(HeapObject* obj);
883 friend class UnmarkObjectVisitor;
884 static void UnmarkObject(HeapObject* obj);
888 base::VirtualMemory* marking_deque_memory_;
889 bool marking_deque_memory_committed_;
890 MarkingDeque marking_deque_;
891 CodeFlusher* code_flusher_;
892 bool have_code_to_deoptimize_;
894 List<Page*> evacuation_candidates_;
895 List<Code*> invalidated_code_;
897 SmartPointer<FreeList> free_list_old_data_space_;
898 SmartPointer<FreeList> free_list_old_pointer_space_;
904 class MarkBitCellIterator BASE_EMBEDDED {
906 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
907 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
908 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
909 cell_base_ = chunk_->area_start();
910 cell_index_ = Bitmap::IndexToCell(
911 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
912 cells_ = chunk_->markbits()->cells();
915 inline bool Done() { return cell_index_ == last_cell_index_; }
917 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
919 inline MarkBit::CellType* CurrentCell() {
920 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
921 chunk_->AddressToMarkbitIndex(cell_base_))));
922 return &cells_[cell_index_];
925 inline Address CurrentCellBase() {
926 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
927 chunk_->AddressToMarkbitIndex(cell_base_))));
931 inline void Advance() {
933 cell_base_ += 32 * kPointerSize;
938 MarkBit::CellType* cells_;
939 unsigned int last_cell_index_;
940 unsigned int cell_index_;
945 class EvacuationScope BASE_EMBEDDED {
947 explicit EvacuationScope(MarkCompactCollector* collector)
948 : collector_(collector) {
949 collector_->set_evacuation(true);
952 ~EvacuationScope() { collector_->set_evacuation(false); }
955 MarkCompactCollector* collector_;
959 const char* AllocationSpaceName(AllocationSpace space);
961 } // namespace v8::internal
963 #endif // V8_HEAP_MARK_COMPACT_H_