1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
8 #include "src/base/bits.h"
9 #include "src/heap/spaces.h"
14 // Callback function, returns whether an object is alive. The heap size
15 // of the object is returned in size. It optionally updates the offset
16 // to the first live object in the page (only used for old and map objects).
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19 // Forward declarations.
21 class MarkCompactCollector;
23 class RootMarkingVisitor;
28 explicit Marking(Heap* heap) : heap_(heap) {}
30 INLINE(static MarkBit MarkBitFrom(Address addr));
32 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
33 return MarkBitFrom(reinterpret_cast<Address>(obj));
36 // Impossible markbits: 01
37 static const char* kImpossibleBitPattern;
38 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
39 return !mark_bit.Get() && mark_bit.Next().Get();
42 // Black markbits: 10 - this is required by the sweeper.
43 static const char* kBlackBitPattern;
44 INLINE(static bool IsBlack(MarkBit mark_bit)) {
45 return mark_bit.Get() && !mark_bit.Next().Get();
48 // White markbits: 00 - this is required by the mark bit clearer.
49 static const char* kWhiteBitPattern;
50 INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
53 static const char* kGreyBitPattern;
54 INLINE(static bool IsGrey(MarkBit mark_bit)) {
55 return mark_bit.Get() && mark_bit.Next().Get();
58 INLINE(static void MarkBlack(MarkBit mark_bit)) {
60 mark_bit.Next().Clear();
63 INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
65 INLINE(static void WhiteToGrey(MarkBit markbit)) {
70 INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
72 INLINE(static void BlackToGrey(HeapObject* obj)) {
73 BlackToGrey(MarkBitFrom(obj));
76 INLINE(static void AnyToGrey(MarkBit markbit)) {
81 void TransferMark(Address old_start, Address new_start);
91 static const char* ColorName(ObjectColor color) {
99 case IMPOSSIBLE_COLOR:
105 static ObjectColor Color(HeapObject* obj) {
106 return Color(Marking::MarkBitFrom(obj));
109 static ObjectColor Color(MarkBit mark_bit) {
110 if (IsBlack(mark_bit)) return BLACK_OBJECT;
111 if (IsWhite(mark_bit)) return WHITE_OBJECT;
112 if (IsGrey(mark_bit)) return GREY_OBJECT;
114 return IMPOSSIBLE_COLOR;
118 // Returns true if the transferred color is black.
119 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
120 MarkBit from_mark_bit = MarkBitFrom(from);
121 MarkBit to_mark_bit = MarkBitFrom(to);
122 bool is_black = false;
123 if (from_mark_bit.Get()) {
125 is_black = true; // Looks black so far.
127 if (from_mark_bit.Next().Get()) {
128 to_mark_bit.Next().Set();
129 is_black = false; // Was actually gray.
138 // ----------------------------------------------------------------------------
139 // Marking deque for tracing live objects.
143 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
145 void Initialize(Address low, Address high) {
146 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
147 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
149 mask_ = base::bits::RoundDownToPowerOfTwo32(
150 static_cast<uint32_t>(obj_high - obj_low)) -
156 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
158 inline bool IsEmpty() { return top_ == bottom_; }
160 bool overflowed() const { return overflowed_; }
162 void ClearOverflowed() { overflowed_ = false; }
164 void SetOverflowed() { overflowed_ = true; }
166 // Push the (marked) object on the marking stack if there is room,
167 // otherwise mark the object as overflowed and wait for a rescan of the
169 INLINE(void PushBlack(HeapObject* object)) {
170 DCHECK(object->IsHeapObject());
172 Marking::BlackToGrey(object);
173 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
176 array_[top_] = object;
177 top_ = ((top_ + 1) & mask_);
181 INLINE(void PushGrey(HeapObject* object)) {
182 DCHECK(object->IsHeapObject());
186 array_[top_] = object;
187 top_ = ((top_ + 1) & mask_);
191 INLINE(HeapObject* Pop()) {
193 top_ = ((top_ - 1) & mask_);
194 HeapObject* object = array_[top_];
195 DCHECK(object->IsHeapObject());
199 INLINE(void UnshiftGrey(HeapObject* object)) {
200 DCHECK(object->IsHeapObject());
204 bottom_ = ((bottom_ - 1) & mask_);
205 array_[bottom_] = object;
209 HeapObject** array() { return array_; }
210 int bottom() { return bottom_; }
211 int top() { return top_; }
212 int mask() { return mask_; }
213 void set_top(int top) { top_ = top; }
217 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
218 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
225 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
229 class SlotsBufferAllocator {
231 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
232 void DeallocateBuffer(SlotsBuffer* buffer);
234 void DeallocateChain(SlotsBuffer** buffer_address);
238 // SlotsBuffer records a sequence of slots that has to be updated
239 // after live objects were relocated from evacuation candidates.
240 // All slots are either untyped or typed:
241 // - Untyped slots are expected to contain a tagged object pointer.
242 // They are recorded by an address.
243 // - Typed slots are expected to contain an encoded pointer to a heap
244 // object where the way of encoding depends on the type of the slot.
245 // They are recorded as a pair (SlotType, slot address).
246 // We assume that zero-page is never mapped this allows us to distinguish
247 // untyped slots from typed slots during iteration by a simple comparison:
248 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
249 // is the first element of typed slot's pair.
252 typedef Object** ObjectSlot;
254 explicit SlotsBuffer(SlotsBuffer* next_buffer)
255 : idx_(0), chain_length_(1), next_(next_buffer) {
257 chain_length_ = next_->chain_length_ + 1;
263 void Add(ObjectSlot slot) {
264 DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
265 slots_[idx_++] = slot;
269 EMBEDDED_OBJECT_SLOT,
270 RELOCATED_CODE_OBJECT,
278 static const char* SlotTypeToString(SlotType type) {
280 case EMBEDDED_OBJECT_SLOT:
281 return "EMBEDDED_OBJECT_SLOT";
282 case RELOCATED_CODE_OBJECT:
283 return "RELOCATED_CODE_OBJECT";
284 case CODE_TARGET_SLOT:
285 return "CODE_TARGET_SLOT";
286 case CODE_ENTRY_SLOT:
287 return "CODE_ENTRY_SLOT";
288 case DEBUG_TARGET_SLOT:
289 return "DEBUG_TARGET_SLOT";
291 return "JS_RETURN_SLOT";
292 case NUMBER_OF_SLOT_TYPES:
293 return "NUMBER_OF_SLOT_TYPES";
295 return "UNKNOWN SlotType";
298 void UpdateSlots(Heap* heap);
300 void UpdateSlotsWithFilter(Heap* heap);
302 SlotsBuffer* next() { return next_; }
304 static int SizeOfChain(SlotsBuffer* buffer) {
305 if (buffer == NULL) return 0;
306 return static_cast<int>(buffer->idx_ +
307 (buffer->chain_length_ - 1) * kNumberOfElements);
310 inline bool IsFull() { return idx_ == kNumberOfElements; }
312 inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
314 static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
315 bool code_slots_filtering_required) {
316 while (buffer != NULL) {
317 if (code_slots_filtering_required) {
318 buffer->UpdateSlotsWithFilter(heap);
320 buffer->UpdateSlots(heap);
322 buffer = buffer->next();
326 enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
328 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
329 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
332 INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
333 SlotsBuffer** buffer_address, ObjectSlot slot,
334 AdditionMode mode)) {
335 SlotsBuffer* buffer = *buffer_address;
336 if (buffer == NULL || buffer->IsFull()) {
337 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
338 allocator->DeallocateChain(buffer_address);
341 buffer = allocator->AllocateBuffer(buffer);
342 *buffer_address = buffer;
348 static bool IsTypedSlot(ObjectSlot slot);
350 static bool AddTo(SlotsBufferAllocator* allocator,
351 SlotsBuffer** buffer_address, SlotType type, Address addr,
354 static const int kNumberOfElements = 1021;
357 static const int kChainLengthThreshold = 15;
360 intptr_t chain_length_;
362 ObjectSlot slots_[kNumberOfElements];
366 // CodeFlusher collects candidates for code flushing during marking and
367 // processes those candidates after marking has completed in order to
368 // reset those functions referencing code objects that would otherwise
369 // be unreachable. Code objects can be referenced in three ways:
370 // - SharedFunctionInfo references unoptimized code.
371 // - JSFunction references either unoptimized or optimized code.
372 // - OptimizedCodeMap references optimized code.
373 // We are not allowed to flush unoptimized code for functions that got
374 // optimized or inlined into optimized code, because we might bailout
375 // into the unoptimized code again during deoptimization.
378 explicit CodeFlusher(Isolate* isolate)
380 jsfunction_candidates_head_(NULL),
381 shared_function_info_candidates_head_(NULL),
382 optimized_code_map_holder_head_(NULL) {}
384 void AddCandidate(SharedFunctionInfo* shared_info) {
385 if (GetNextCandidate(shared_info) == NULL) {
386 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
387 shared_function_info_candidates_head_ = shared_info;
391 void AddCandidate(JSFunction* function) {
392 DCHECK(function->code() == function->shared()->code());
393 if (GetNextCandidate(function)->IsUndefined()) {
394 SetNextCandidate(function, jsfunction_candidates_head_);
395 jsfunction_candidates_head_ = function;
399 void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
400 if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
401 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
402 optimized_code_map_holder_head_ = code_map_holder;
406 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
407 void EvictCandidate(SharedFunctionInfo* shared_info);
408 void EvictCandidate(JSFunction* function);
410 void ProcessCandidates() {
411 ProcessOptimizedCodeMaps();
412 ProcessSharedFunctionInfoCandidates();
413 ProcessJSFunctionCandidates();
416 void EvictAllCandidates() {
417 EvictOptimizedCodeMaps();
418 EvictJSFunctionCandidates();
419 EvictSharedFunctionInfoCandidates();
422 void IteratePointersToFromSpace(ObjectVisitor* v);
425 void ProcessOptimizedCodeMaps();
426 void ProcessJSFunctionCandidates();
427 void ProcessSharedFunctionInfoCandidates();
428 void EvictOptimizedCodeMaps();
429 void EvictJSFunctionCandidates();
430 void EvictSharedFunctionInfoCandidates();
432 static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
433 return reinterpret_cast<JSFunction**>(
434 HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
437 static JSFunction* GetNextCandidate(JSFunction* candidate) {
438 Object* next_candidate = candidate->next_function_link();
439 return reinterpret_cast<JSFunction*>(next_candidate);
442 static void SetNextCandidate(JSFunction* candidate,
443 JSFunction* next_candidate) {
444 candidate->set_next_function_link(next_candidate);
447 static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
448 DCHECK(undefined->IsUndefined());
449 candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
452 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
453 Object* next_candidate = candidate->code()->gc_metadata();
454 return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
457 static void SetNextCandidate(SharedFunctionInfo* candidate,
458 SharedFunctionInfo* next_candidate) {
459 candidate->code()->set_gc_metadata(next_candidate);
462 static void ClearNextCandidate(SharedFunctionInfo* candidate) {
463 candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
466 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
467 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
468 Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
469 return reinterpret_cast<SharedFunctionInfo*>(next_map);
472 static void SetNextCodeMap(SharedFunctionInfo* holder,
473 SharedFunctionInfo* next_holder) {
474 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
475 code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
478 static void ClearNextCodeMap(SharedFunctionInfo* holder) {
479 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
480 code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
484 JSFunction* jsfunction_candidates_head_;
485 SharedFunctionInfo* shared_function_info_candidates_head_;
486 SharedFunctionInfo* optimized_code_map_holder_head_;
488 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
492 // Defined in isolate.h.
493 class ThreadLocalTop;
496 // -------------------------------------------------------------------------
497 // Mark-Compact collector
498 class MarkCompactCollector {
500 // Set the global flags, it must be called before Prepare to take effect.
501 inline void SetFlags(int flags);
503 static void Initialize();
509 void CollectEvacuationCandidates(PagedSpace* space);
511 void AddEvacuationCandidate(Page* p);
513 // Prepares for GC by resetting relocation info in old and map spaces and
514 // choosing spaces to compact.
517 // Performs a global garbage collection.
518 void CollectGarbage();
520 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
522 bool StartCompaction(CompactionMode mode);
524 void AbortCompaction();
527 // Checks whether performing mark-compact collection.
528 bool in_use() { return state_ > PREPARE_GC; }
529 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
532 // Determine type of object and emit deletion log event.
533 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
535 // Distinguishable invalid map encodings (for single word and multiple words)
536 // that indicate free regions.
537 static const uint32_t kSingleFreeEncoding = 0;
538 static const uint32_t kMultiFreeEncoding = 1;
540 static inline bool IsMarked(Object* obj);
542 inline Heap* heap() const { return heap_; }
543 inline Isolate* isolate() const;
545 CodeFlusher* code_flusher() { return code_flusher_; }
546 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
547 void EnableCodeFlushing(bool enable);
555 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
558 void VerifyMarkbitsAreClean();
559 static void VerifyMarkbitsAreClean(PagedSpace* space);
560 static void VerifyMarkbitsAreClean(NewSpace* space);
561 void VerifyWeakEmbeddedObjectsInCode();
562 void VerifyOmittedMapChecks();
565 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
566 return Page::FromAddress(reinterpret_cast<Address>(anchor))
567 ->ShouldSkipEvacuationSlotRecording();
570 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
571 return Page::FromAddress(reinterpret_cast<Address>(host))
572 ->ShouldSkipEvacuationSlotRecording();
575 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
576 return Page::FromAddress(reinterpret_cast<Address>(obj))
577 ->IsEvacuationCandidate();
580 INLINE(void EvictEvacuationCandidate(Page* page)) {
581 if (FLAG_trace_fragmentation) {
582 PrintF("Page %p is too popular. Disabling evacuation.\n",
583 reinterpret_cast<void*>(page));
586 // TODO(gc) If all evacuation candidates are too popular we
587 // should stop slots recording entirely.
588 page->ClearEvacuationCandidate();
590 // We were not collecting slots on this page that point
591 // to other evacuation candidates thus we have to
592 // rescan the page after evacuation to discover and update all
593 // pointers to evacuated objects.
594 if (page->owner()->identity() == OLD_DATA_SPACE) {
595 evacuation_candidates_.RemoveElement(page);
597 page->SetFlag(Page::RESCAN_ON_EVACUATION);
601 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
602 void RecordCodeEntrySlot(Address slot, Code* target);
603 void RecordCodeTargetPatch(Address pc, Code* target);
605 INLINE(void RecordSlot(
606 Object** anchor_slot, Object** slot, Object* object,
607 SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
609 void MigrateObject(HeapObject* dst, HeapObject* src, int size,
610 AllocationSpace to_old_space);
612 bool TryPromoteObject(HeapObject* object, int object_size);
614 void InvalidateCode(Code* code);
616 void ClearMarkbits();
618 bool abort_incremental_marking() const { return abort_incremental_marking_; }
620 bool is_compacting() const { return compacting_; }
622 MarkingParity marking_parity() { return marking_parity_; }
624 // Concurrent and parallel sweeping support. If required_freed_bytes was set
625 // to a value larger than 0, then sweeping returns after a block of at least
626 // required_freed_bytes was freed. If required_freed_bytes was set to zero
627 // then the whole given space is swept. It returns the size of the maximum
628 // continuous freed memory chunk.
629 int SweepInParallel(PagedSpace* space, int required_freed_bytes);
631 // Sweeps a given page concurrently to the sweeper threads. It returns the
632 // size of the maximum continuous freed memory chunk.
633 int SweepInParallel(Page* page, PagedSpace* space);
635 void EnsureSweepingCompleted();
637 // If sweeper threads are not active this method will return true. If
638 // this is a latency issue we should be smarter here. Otherwise, it will
639 // return true if the sweeper threads are done processing the pages.
640 bool IsSweepingCompleted();
642 void RefillFreeList(PagedSpace* space);
644 bool AreSweeperThreadsActivated();
646 // Checks if sweeping is in progress right now on any space.
647 bool sweeping_in_progress() { return sweeping_in_progress_; }
649 void set_sequential_sweeping(bool sequential_sweeping) {
650 sequential_sweeping_ = sequential_sweeping;
653 bool sequential_sweeping() const { return sequential_sweeping_; }
655 // Mark the global table which maps weak objects to dependent code without
656 // marking its contents.
657 void MarkWeakObjectToCodeTable();
659 // Special case for processing weak references in a full collection. We need
660 // to artificially keep AllocationSites alive for a time.
661 void MarkAllocationSite(AllocationSite* site);
666 explicit MarkCompactCollector(Heap* heap);
667 ~MarkCompactCollector();
669 bool MarkInvalidatedCode();
670 bool WillBeDeoptimized(Code* code);
671 void RemoveDeadInvalidatedCode();
672 void ProcessInvalidatedCode(ObjectVisitor* visitor);
674 void StartSweeperThreads();
677 enum CollectorState {
682 ENCODE_FORWARDING_ADDRESSES,
687 // The current stage of the collector.
688 CollectorState state_;
691 bool reduce_memory_footprint_;
693 bool abort_incremental_marking_;
695 MarkingParity marking_parity_;
697 // True if we are collecting slots to perform evacuation from evacuation
701 bool was_marked_incrementally_;
703 // True if concurrent or parallel sweeping is currently in progress.
704 bool sweeping_in_progress_;
706 base::Semaphore pending_sweeper_jobs_semaphore_;
708 bool sequential_sweeping_;
710 SlotsBufferAllocator slots_buffer_allocator_;
712 SlotsBuffer* migration_slots_buffer_;
714 // Finishes GC, performs heap verification if enabled.
717 // -----------------------------------------------------------------------
718 // Phase 1: Marking live objects.
720 // Before: The heap has been prepared for garbage collection by
721 // MarkCompactCollector::Prepare() and is otherwise in its
724 // After: Live objects are marked and non-live objects are unmarked.
726 friend class RootMarkingVisitor;
727 friend class MarkingVisitor;
728 friend class MarkCompactMarkingVisitor;
729 friend class CodeMarkingVisitor;
730 friend class SharedFunctionInfoMarkingVisitor;
732 // Mark code objects that are active on the stack to prevent them
733 // from being flushed.
734 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
736 void PrepareForCodeFlushing();
738 // Marking operations for objects reachable from roots.
739 void MarkLiveObjects();
743 // Marks the object black and pushes it on the marking stack.
744 // This is for non-incremental marking only.
745 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
747 // Marks the object black assuming that it is not yet marked.
748 // This is for non-incremental marking only.
749 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
751 // Mark the heap roots and all objects reachable from them.
752 void MarkRoots(RootMarkingVisitor* visitor);
754 // Mark the string table specially. References to internalized strings from
755 // the string table are weak.
756 void MarkStringTable(RootMarkingVisitor* visitor);
758 // Mark objects in implicit references groups if their parent object
760 void MarkImplicitRefGroups();
762 // Mark objects reachable (transitively) from objects in the marking stack
763 // or overflowed in the heap.
764 void ProcessMarkingDeque();
766 // Mark objects reachable (transitively) from objects in the marking stack
767 // or overflowed in the heap. This respects references only considered in
768 // the final atomic marking pause including the following:
769 // - Processing of objects reachable through Harmony WeakMaps.
770 // - Objects reachable due to host application logic like object groups
771 // or implicit references' groups.
772 void ProcessEphemeralMarking(ObjectVisitor* visitor);
774 // If the call-site of the top optimized code was not prepared for
775 // deoptimization, then treat the maps in the code as strong pointers,
776 // otherwise a map can die and deoptimize the code.
777 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
779 // Mark objects reachable (transitively) from objects in the marking
780 // stack. This function empties the marking stack, but may leave
781 // overflowed objects in the heap, in which case the marking stack's
782 // overflow flag will be set.
783 void EmptyMarkingDeque();
785 // Refill the marking stack with overflowed objects from the heap. This
786 // function either leaves the marking stack full or clears the overflow
787 // flag on the marking stack.
788 void RefillMarkingDeque();
790 // After reachable maps have been marked process per context object
791 // literal map caches removing unmarked entries.
792 void ProcessMapCaches();
794 // Callback function for telling whether the object *p is an unmarked
796 static bool IsUnmarkedHeapObject(Object** p);
797 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
799 // Map transitions from a live map to a dead map must be killed.
800 // We replace them with a null descriptor, with the same key.
801 void ClearNonLiveReferences();
802 void ClearNonLivePrototypeTransitions(Map* map);
803 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
804 void ClearMapTransitions(Map* map);
805 bool ClearMapBackPointer(Map* map);
806 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
807 int number_of_own_descriptors);
808 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
810 void ClearDependentCode(DependentCode* dependent_code);
811 void ClearDependentICList(Object* head);
812 void ClearNonLiveDependentCode(DependentCode* dependent_code);
813 int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
814 int start, int end, int new_start);
816 // Mark all values associated with reachable keys in weak collections
817 // encountered so far. This might push new object or even new weak maps onto
818 // the marking stack.
819 void ProcessWeakCollections();
821 // After all reachable objects have been marked those weak map entries
822 // with an unreachable key are removed from all encountered weak maps.
823 // The linked list of all encountered weak maps is destroyed.
824 void ClearWeakCollections();
826 // We have to remove all encountered weak maps from the list of weak
827 // collections when incremental marking is aborted.
828 void AbortWeakCollections();
830 // -----------------------------------------------------------------------
831 // Phase 2: Sweeping to clear mark bits and free non-live objects for
832 // a non-compacting collection.
834 // Before: Live objects are marked and non-live objects are unmarked.
836 // After: Live objects are unmarked, non-live regions have been added to
837 // their space's free list. Active eden semispace is compacted by
841 // If we are not compacting the heap, we simply sweep the spaces except
842 // for the large object space, clearing mark bits and adding unmarked
843 // regions to each space's free list.
846 int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
849 void EvacuateNewSpace();
851 void EvacuateLiveObjectsFromPage(Page* p);
853 void EvacuatePages();
855 void EvacuateNewSpaceAndCandidates();
857 void ReleaseEvacuationCandidates();
859 // Moves the pages of the evacuation_candidates_ list to the end of their
860 // corresponding space pages list.
861 void MoveEvacuationCandidatesToEndOfPagesList();
863 void SweepSpace(PagedSpace* space, SweeperType sweeper);
865 // Finalizes the parallel sweeping phase. Marks all the pages that were
866 // swept in parallel.
867 void ParallelSweepSpacesComplete();
869 void ParallelSweepSpaceComplete(PagedSpace* space);
871 // Updates store buffer and slot buffer for a pointer in a migrating object.
872 void RecordMigratedSlot(Object* value, Address slot);
875 friend class MarkObjectVisitor;
876 static void VisitObject(HeapObject* obj);
878 friend class UnmarkObjectVisitor;
879 static void UnmarkObject(HeapObject* obj);
883 MarkingDeque marking_deque_;
884 CodeFlusher* code_flusher_;
885 bool have_code_to_deoptimize_;
887 List<Page*> evacuation_candidates_;
888 List<Code*> invalidated_code_;
890 SmartPointer<FreeList> free_list_old_data_space_;
891 SmartPointer<FreeList> free_list_old_pointer_space_;
897 class MarkBitCellIterator BASE_EMBEDDED {
899 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
900 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
901 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
902 cell_base_ = chunk_->area_start();
903 cell_index_ = Bitmap::IndexToCell(
904 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
905 cells_ = chunk_->markbits()->cells();
908 inline bool Done() { return cell_index_ == last_cell_index_; }
910 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
912 inline MarkBit::CellType* CurrentCell() {
913 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
914 chunk_->AddressToMarkbitIndex(cell_base_))));
915 return &cells_[cell_index_];
918 inline Address CurrentCellBase() {
919 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
920 chunk_->AddressToMarkbitIndex(cell_base_))));
924 inline void Advance() {
926 cell_base_ += 32 * kPointerSize;
931 MarkBit::CellType* cells_;
932 unsigned int last_cell_index_;
933 unsigned int cell_index_;
938 class SequentialSweepingScope BASE_EMBEDDED {
940 explicit SequentialSweepingScope(MarkCompactCollector* collector)
941 : collector_(collector) {
942 collector_->set_sequential_sweeping(true);
945 ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
948 MarkCompactCollector* collector_;
952 const char* AllocationSpaceName(AllocationSpace space);
954 } // namespace v8::internal
956 #endif // V8_HEAP_MARK_COMPACT_H_