1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
8 #include "src/base/bits.h"
9 #include "src/heap/spaces.h"
14 // Callback function, returns whether an object is alive. The heap size
15 // of the object is returned in size. It optionally updates the offset
16 // to the first live object in the page (only used for old and map objects).
17 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19 // Callback function to mark an object in a given heap.
20 typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
22 // Forward declarations.
24 class MarkCompactCollector;
26 class RootMarkingVisitor;
28 class SlotsBufferAllocator;
31 class Marking : public AllStatic {
33 INLINE(static MarkBit MarkBitFrom(Address addr)) {
34 MemoryChunk* p = MemoryChunk::FromAddress(addr);
35 return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
38 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
39 return MarkBitFrom(reinterpret_cast<Address>(obj));
42 // Impossible markbits: 01
43 static const char* kImpossibleBitPattern;
44 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
45 return !mark_bit.Get() && mark_bit.Next().Get();
48 // Black markbits: 10 - this is required by the sweeper.
49 static const char* kBlackBitPattern;
50 INLINE(static bool IsBlack(MarkBit mark_bit)) {
51 return mark_bit.Get() && !mark_bit.Next().Get();
54 // White markbits: 00 - this is required by the mark bit clearer.
55 static const char* kWhiteBitPattern;
56 INLINE(static bool IsWhite(MarkBit mark_bit)) {
57 DCHECK(!IsImpossible(mark_bit));
58 return !mark_bit.Get();
62 static const char* kGreyBitPattern;
63 INLINE(static bool IsGrey(MarkBit mark_bit)) {
64 return mark_bit.Get() && mark_bit.Next().Get();
67 // IsBlackOrGrey assumes that the first bit is set for black or grey
69 INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
71 INLINE(static void MarkBlack(MarkBit mark_bit)) {
73 mark_bit.Next().Clear();
76 INLINE(static void MarkWhite(MarkBit mark_bit)) {
78 mark_bit.Next().Clear();
81 INLINE(static void BlackToWhite(MarkBit markbit)) {
82 DCHECK(IsBlack(markbit));
86 INLINE(static void GreyToWhite(MarkBit markbit)) {
87 DCHECK(IsGrey(markbit));
89 markbit.Next().Clear();
92 INLINE(static void BlackToGrey(MarkBit markbit)) {
93 DCHECK(IsBlack(markbit));
97 INLINE(static void WhiteToGrey(MarkBit markbit)) {
98 DCHECK(IsWhite(markbit));
100 markbit.Next().Set();
103 INLINE(static void WhiteToBlack(MarkBit markbit)) {
104 DCHECK(IsWhite(markbit));
108 INLINE(static void GreyToBlack(MarkBit markbit)) {
109 DCHECK(IsGrey(markbit));
110 markbit.Next().Clear();
113 INLINE(static void BlackToGrey(HeapObject* obj)) {
114 BlackToGrey(MarkBitFrom(obj));
117 INLINE(static void AnyToGrey(MarkBit markbit)) {
119 markbit.Next().Set();
122 static void TransferMark(Heap* heap, Address old_start, Address new_start);
132 static const char* ColorName(ObjectColor color) {
140 case IMPOSSIBLE_COLOR:
146 static ObjectColor Color(HeapObject* obj) {
147 return Color(Marking::MarkBitFrom(obj));
150 static ObjectColor Color(MarkBit mark_bit) {
151 if (IsBlack(mark_bit)) return BLACK_OBJECT;
152 if (IsWhite(mark_bit)) return WHITE_OBJECT;
153 if (IsGrey(mark_bit)) return GREY_OBJECT;
155 return IMPOSSIBLE_COLOR;
159 // Returns true if the transferred color is black.
160 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
161 MarkBit from_mark_bit = MarkBitFrom(from);
162 MarkBit to_mark_bit = MarkBitFrom(to);
163 bool is_black = false;
164 if (from_mark_bit.Get()) {
166 is_black = true; // Looks black so far.
168 if (from_mark_bit.Next().Get()) {
169 to_mark_bit.Next().Set();
170 is_black = false; // Was actually gray.
176 DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
179 // ----------------------------------------------------------------------------
180 // Marking deque for tracing live objects.
191 void Initialize(Address low, Address high);
192 void Uninitialize(bool aborting = false);
194 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
196 inline bool IsEmpty() { return top_ == bottom_; }
198 bool overflowed() const { return overflowed_; }
200 bool in_use() const { return in_use_; }
202 void ClearOverflowed() { overflowed_ = false; }
204 void SetOverflowed() { overflowed_ = true; }
206 // Push the object on the marking stack if there is room, otherwise mark the
207 // deque as overflowed and wait for a rescan of the heap.
208 INLINE(bool Push(HeapObject* object)) {
209 DCHECK(object->IsHeapObject());
214 array_[top_] = object;
215 top_ = ((top_ + 1) & mask_);
220 INLINE(HeapObject* Pop()) {
222 top_ = ((top_ - 1) & mask_);
223 HeapObject* object = array_[top_];
224 DCHECK(object->IsHeapObject());
228 // Unshift the object into the marking stack if there is room, otherwise mark
229 // the deque as overflowed and wait for a rescan of the heap.
230 INLINE(bool Unshift(HeapObject* object)) {
231 DCHECK(object->IsHeapObject());
236 bottom_ = ((bottom_ - 1) & mask_);
237 array_[bottom_] = object;
242 HeapObject** array() { return array_; }
243 int bottom() { return bottom_; }
244 int top() { return top_; }
245 int mask() { return mask_; }
246 void set_top(int top) { top_ = top; }
250 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
251 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
259 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
263 // CodeFlusher collects candidates for code flushing during marking and
264 // processes those candidates after marking has completed in order to
265 // reset those functions referencing code objects that would otherwise
266 // be unreachable. Code objects can be referenced in three ways:
267 // - SharedFunctionInfo references unoptimized code.
268 // - JSFunction references either unoptimized or optimized code.
269 // - OptimizedCodeMap references optimized code.
270 // We are not allowed to flush unoptimized code for functions that got
271 // optimized or inlined into optimized code, because we might bailout
272 // into the unoptimized code again during deoptimization.
275 explicit CodeFlusher(Isolate* isolate)
277 jsfunction_candidates_head_(NULL),
278 shared_function_info_candidates_head_(NULL),
279 optimized_code_map_holder_head_(NULL) {}
281 inline void AddCandidate(SharedFunctionInfo* shared_info);
282 inline void AddCandidate(JSFunction* function);
283 inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
285 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
286 void EvictCandidate(SharedFunctionInfo* shared_info);
287 void EvictCandidate(JSFunction* function);
289 void ProcessCandidates() {
290 ProcessOptimizedCodeMaps();
291 ProcessSharedFunctionInfoCandidates();
292 ProcessJSFunctionCandidates();
295 void EvictAllCandidates() {
296 EvictOptimizedCodeMaps();
297 EvictJSFunctionCandidates();
298 EvictSharedFunctionInfoCandidates();
301 void IteratePointersToFromSpace(ObjectVisitor* v);
304 void ProcessOptimizedCodeMaps();
305 void ProcessJSFunctionCandidates();
306 void ProcessSharedFunctionInfoCandidates();
307 void EvictOptimizedCodeMaps();
308 void EvictJSFunctionCandidates();
309 void EvictSharedFunctionInfoCandidates();
311 static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
312 static inline JSFunction* GetNextCandidate(JSFunction* candidate);
313 static inline void SetNextCandidate(JSFunction* candidate,
314 JSFunction* next_candidate);
315 static inline void ClearNextCandidate(JSFunction* candidate,
318 static inline SharedFunctionInfo* GetNextCandidate(
319 SharedFunctionInfo* candidate);
320 static inline void SetNextCandidate(SharedFunctionInfo* candidate,
321 SharedFunctionInfo* next_candidate);
322 static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
324 static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
325 static inline void SetNextCodeMap(SharedFunctionInfo* holder,
326 SharedFunctionInfo* next_holder);
327 static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
330 JSFunction* jsfunction_candidates_head_;
331 SharedFunctionInfo* shared_function_info_candidates_head_;
332 SharedFunctionInfo* optimized_code_map_holder_head_;
334 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
338 // Defined in isolate.h.
339 class ThreadLocalTop;
342 // -------------------------------------------------------------------------
343 // Mark-Compact collector
344 class MarkCompactCollector {
346 static void Initialize();
352 void CollectEvacuationCandidates(PagedSpace* space);
354 void AddEvacuationCandidate(Page* p);
356 // Prepares for GC by resetting relocation info in old and map spaces and
357 // choosing spaces to compact.
360 // Performs a global garbage collection.
361 void CollectGarbage();
363 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
365 bool StartCompaction(CompactionMode mode);
367 void AbortCompaction();
370 // Checks whether performing mark-compact collection.
371 bool in_use() { return state_ > PREPARE_GC; }
372 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
375 // Determine type of object and emit deletion log event.
376 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
378 // Distinguishable invalid map encodings (for single word and multiple words)
379 // that indicate free regions.
380 static const uint32_t kSingleFreeEncoding = 0;
381 static const uint32_t kMultiFreeEncoding = 1;
383 static inline bool IsMarked(Object* obj);
384 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
386 inline Heap* heap() const { return heap_; }
387 inline Isolate* isolate() const;
389 CodeFlusher* code_flusher() { return code_flusher_; }
390 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
391 void EnableCodeFlushing(bool enable);
398 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
401 void VerifyValidStoreAndSlotsBufferEntries();
402 void VerifyMarkbitsAreClean();
403 static void VerifyMarkbitsAreClean(PagedSpace* space);
404 static void VerifyMarkbitsAreClean(NewSpace* space);
405 void VerifyWeakEmbeddedObjectsInCode();
406 void VerifyOmittedMapChecks();
409 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
410 return Page::FromAddress(reinterpret_cast<Address>(host))
411 ->ShouldSkipEvacuationSlotRecording();
414 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
415 return Page::FromAddress(reinterpret_cast<Address>(obj))
416 ->IsEvacuationCandidate();
419 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
420 void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
421 void RecordCodeTargetPatch(Address pc, Code* target);
422 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
423 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
426 void UpdateSlots(SlotsBuffer* buffer);
427 void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
429 void MigrateObject(HeapObject* dst, HeapObject* src, int size,
430 AllocationSpace to_old_space,
431 SlotsBuffer** evacuation_slots_buffer);
433 void MigrateObjectTagged(HeapObject* dst, HeapObject* src, int size,
434 SlotsBuffer** evacuation_slots_buffer);
435 void MigrateObjectMixed(HeapObject* dst, HeapObject* src, int size,
436 SlotsBuffer** evacuation_slots_buffer);
437 void MigrateObjectRaw(HeapObject* dst, HeapObject* src, int size);
439 bool TryPromoteObject(HeapObject* object, int object_size);
441 void InvalidateCode(Code* code);
443 void ClearMarkbits();
445 bool is_compacting() const { return compacting_; }
447 MarkingParity marking_parity() { return marking_parity_; }
449 // Concurrent and parallel sweeping support. If required_freed_bytes was set
450 // to a value larger than 0, then sweeping returns after a block of at least
451 // required_freed_bytes was freed. If required_freed_bytes was set to zero
452 // then the whole given space is swept. It returns the size of the maximum
453 // continuous freed memory chunk.
454 int SweepInParallel(PagedSpace* space, int required_freed_bytes);
456 // Sweeps a given page concurrently to the sweeper threads. It returns the
457 // size of the maximum continuous freed memory chunk.
458 int SweepInParallel(Page* page, PagedSpace* space);
460 void EnsureSweepingCompleted();
462 void SweepOrWaitUntilSweepingCompleted(Page* page);
464 // If sweeper threads are not active this method will return true. If
465 // this is a latency issue we should be smarter here. Otherwise, it will
466 // return true if the sweeper threads are done processing the pages.
467 bool IsSweepingCompleted();
469 void RefillFreeList(PagedSpace* space);
471 // Checks if sweeping is in progress right now on any space.
472 bool sweeping_in_progress() { return sweeping_in_progress_; }
474 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
476 bool evacuation() const { return evacuation_; }
478 // Special case for processing weak references in a full collection. We need
479 // to artificially keep AllocationSites alive for a time.
480 void MarkAllocationSite(AllocationSite* site);
482 // Mark objects in implicit references groups if their parent object
484 void MarkImplicitRefGroups(MarkObjectFunction mark_object);
486 MarkingDeque* marking_deque() { return &marking_deque_; }
488 static const size_t kMaxMarkingDequeSize = 4 * MB;
489 static const size_t kMinMarkingDequeSize = 256 * KB;
491 void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
492 if (!marking_deque_.in_use()) {
493 EnsureMarkingDequeIsCommitted(max_size);
494 InitializeMarkingDeque();
498 void EnsureMarkingDequeIsCommitted(size_t max_size);
499 void EnsureMarkingDequeIsReserved();
501 void InitializeMarkingDeque();
503 // The following four methods can just be called after marking, when the
504 // whole transitive closure is known. They must be called before sweeping
505 // when mark bits are still intact.
506 bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
507 bool IsSlotInBlackObjectSlow(Page* p, Address slot);
508 bool IsSlotInLiveObject(Address slot);
509 void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
511 // Removes all the slots in the slot buffers that are within the given
513 void RemoveObjectSlots(Address start_slot, Address end_slot);
516 class CompactionTask;
519 explicit MarkCompactCollector(Heap* heap);
520 ~MarkCompactCollector();
522 bool WillBeDeoptimized(Code* code);
523 void EvictPopularEvacuationCandidate(Page* page);
524 void ClearInvalidStoreAndSlotsBufferEntries();
526 void StartSweeperThreads();
529 enum CollectorState {
534 ENCODE_FORWARDING_ADDRESSES,
539 // The current stage of the collector.
540 CollectorState state_;
543 MarkingParity marking_parity_;
545 bool was_marked_incrementally_;
549 SlotsBufferAllocator* slots_buffer_allocator_;
551 SlotsBuffer* migration_slots_buffer_;
553 // Finishes GC, performs heap verification if enabled.
556 // -----------------------------------------------------------------------
557 // Phase 1: Marking live objects.
559 // Before: The heap has been prepared for garbage collection by
560 // MarkCompactCollector::Prepare() and is otherwise in its
563 // After: Live objects are marked and non-live objects are unmarked.
565 friend class CodeMarkingVisitor;
566 friend class MarkCompactMarkingVisitor;
567 friend class MarkingVisitor;
568 friend class RootMarkingVisitor;
569 friend class SharedFunctionInfoMarkingVisitor;
570 friend class IncrementalMarkingMarkingVisitor;
572 // Mark code objects that are active on the stack to prevent them
573 // from being flushed.
574 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
576 void PrepareForCodeFlushing();
578 // Marking operations for objects reachable from roots.
579 void MarkLiveObjects();
583 // Pushes a black object onto the marking stack and accounts for live bytes.
584 // Note that this assumes live bytes have not yet been counted.
585 INLINE(void PushBlack(HeapObject* obj));
587 // Unshifts a black object into the marking stack and accounts for live bytes.
588 // Note that this assumes lives bytes have already been counted.
589 INLINE(void UnshiftBlack(HeapObject* obj));
591 // Marks the object black and pushes it on the marking stack.
592 // This is for non-incremental marking only.
593 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
595 // Marks the object black assuming that it is not yet marked.
596 // This is for non-incremental marking only.
597 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
599 // Mark the heap roots and all objects reachable from them.
600 void MarkRoots(RootMarkingVisitor* visitor);
602 // Mark the string table specially. References to internalized strings from
603 // the string table are weak.
604 void MarkStringTable(RootMarkingVisitor* visitor);
606 // Mark objects reachable (transitively) from objects in the marking stack
607 // or overflowed in the heap.
608 void ProcessMarkingDeque();
610 // Mark objects reachable (transitively) from objects in the marking stack
611 // or overflowed in the heap. This respects references only considered in
612 // the final atomic marking pause including the following:
613 // - Processing of objects reachable through Harmony WeakMaps.
614 // - Objects reachable due to host application logic like object groups
615 // or implicit references' groups.
616 void ProcessEphemeralMarking(ObjectVisitor* visitor,
617 bool only_process_harmony_weak_collections);
619 // If the call-site of the top optimized code was not prepared for
620 // deoptimization, then treat the maps in the code as strong pointers,
621 // otherwise a map can die and deoptimize the code.
622 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
624 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
625 // increase chances of reusing of map transition tree in future.
628 // Mark objects reachable (transitively) from objects in the marking
629 // stack. This function empties the marking stack, but may leave
630 // overflowed objects in the heap, in which case the marking stack's
631 // overflow flag will be set.
632 void EmptyMarkingDeque();
634 // Refill the marking stack with overflowed objects from the heap. This
635 // function either leaves the marking stack full or clears the overflow
636 // flag on the marking stack.
637 void RefillMarkingDeque();
639 // Helper methods for refilling the marking stack by discovering grey objects
640 // on various pages of the heap. Used by {RefillMarkingDeque} only.
642 void DiscoverGreyObjectsWithIterator(T* it);
643 void DiscoverGreyObjectsOnPage(MemoryChunk* p);
644 void DiscoverGreyObjectsInSpace(PagedSpace* space);
645 void DiscoverGreyObjectsInNewSpace();
647 // Callback function for telling whether the object *p is an unmarked
649 static bool IsUnmarkedHeapObject(Object** p);
651 // Map transitions from a live map to a dead map must be killed.
652 // We replace them with a null descriptor, with the same key.
653 void ClearNonLiveReferences();
654 void ClearNonLivePrototypeTransitions(Map* map);
655 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
656 void ClearMapTransitions(Map* map, Map* dead_transition);
657 bool ClearMapBackPointer(Map* map);
658 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
659 int number_of_own_descriptors);
660 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
662 // Mark all values associated with reachable keys in weak collections
663 // encountered so far. This might push new object or even new weak maps onto
664 // the marking stack.
665 void ProcessWeakCollections();
667 // After all reachable objects have been marked those weak map entries
668 // with an unreachable key are removed from all encountered weak maps.
669 // The linked list of all encountered weak maps is destroyed.
670 void ClearWeakCollections();
672 // We have to remove all encountered weak maps from the list of weak
673 // collections when incremental marking is aborted.
674 void AbortWeakCollections();
677 void ProcessAndClearWeakCells();
678 void AbortWeakCells();
680 // -----------------------------------------------------------------------
681 // Phase 2: Sweeping to clear mark bits and free non-live objects for
682 // a non-compacting collection.
684 // Before: Live objects are marked and non-live objects are unmarked.
686 // After: Live objects are unmarked, non-live regions have been added to
687 // their space's free list. Active eden semispace is compacted by
691 // If we are not compacting the heap, we simply sweep the spaces except
692 // for the large object space, clearing mark bits and adding unmarked
693 // regions to each space's free list.
696 int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
699 void EvacuateNewSpace();
701 bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space,
702 SlotsBuffer** evacuation_slots_buffer);
704 void AddEvacuationSlotsBufferSynchronized(
705 SlotsBuffer* evacuation_slots_buffer);
707 void EvacuatePages(CompactionSpaceCollection* compaction_spaces,
708 SlotsBuffer** evacuation_slots_buffer);
710 void EvacuatePagesInParallel();
712 // The number of parallel compaction tasks, including the main thread.
713 int NumberOfParallelCompactionTasks();
715 void WaitUntilCompactionCompleted();
717 void EvacuateNewSpaceAndCandidates();
719 void ReleaseEvacuationCandidates();
721 // Moves the pages of the evacuation_candidates_ list to the end of their
722 // corresponding space pages list.
723 void MoveEvacuationCandidatesToEndOfPagesList();
725 void SweepSpace(PagedSpace* space, SweeperType sweeper);
727 // Finalizes the parallel sweeping phase. Marks all the pages that were
728 // swept in parallel.
729 void ParallelSweepSpacesComplete();
731 void ParallelSweepSpaceComplete(PagedSpace* space);
733 // Updates store buffer and slot buffer for a pointer in a migrating object.
734 void RecordMigratedSlot(Object* value, Address slot,
735 SlotsBuffer** evacuation_slots_buffer);
737 // Adds the code entry slot to the slots buffer.
738 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
739 SlotsBuffer** evacuation_slots_buffer);
741 // Adds the slot of a moved code object.
742 void RecordMigratedCodeObjectSlot(Address code_object,
743 SlotsBuffer** evacuation_slots_buffer);
746 friend class MarkObjectVisitor;
747 static void VisitObject(HeapObject* obj);
749 friend class UnmarkObjectVisitor;
750 static void UnmarkObject(HeapObject* obj);
754 base::VirtualMemory* marking_deque_memory_;
755 size_t marking_deque_memory_committed_;
756 MarkingDeque marking_deque_;
757 CodeFlusher* code_flusher_;
758 bool have_code_to_deoptimize_;
760 List<Page*> evacuation_candidates_;
762 // The evacuation_slots_buffers_ are used by the compaction threads.
763 // When a compaction task finishes, it uses
764 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
765 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
767 base::Mutex evacuation_slots_buffers_mutex_;
768 List<SlotsBuffer*> evacuation_slots_buffers_;
770 base::SmartPointer<FreeList> free_list_old_space_;
771 base::SmartPointer<FreeList> free_list_code_space_;
772 base::SmartPointer<FreeList> free_list_map_space_;
774 // True if we are collecting slots to perform evacuation from evacuation
778 // True if concurrent or parallel sweeping is currently in progress.
779 bool sweeping_in_progress_;
781 // True if parallel compaction is currently in progress.
782 bool compaction_in_progress_;
784 // Semaphore used to synchronize sweeper tasks.
785 base::Semaphore pending_sweeper_tasks_semaphore_;
787 // Semaphore used to synchronize compaction tasks.
788 base::Semaphore pending_compaction_tasks_semaphore_;
790 // Number of active compaction tasks (including main thread).
791 intptr_t concurrent_compaction_tasks_active_;
797 class MarkBitCellIterator BASE_EMBEDDED {
799 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
800 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
801 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
802 cell_base_ = chunk_->area_start();
803 cell_index_ = Bitmap::IndexToCell(
804 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
805 cells_ = chunk_->markbits()->cells();
808 inline bool Done() { return cell_index_ == last_cell_index_; }
810 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
812 inline MarkBit::CellType* CurrentCell() {
813 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
814 chunk_->AddressToMarkbitIndex(cell_base_))));
815 return &cells_[cell_index_];
818 inline Address CurrentCellBase() {
819 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
820 chunk_->AddressToMarkbitIndex(cell_base_))));
824 inline void Advance() {
826 cell_base_ += 32 * kPointerSize;
831 MarkBit::CellType* cells_;
832 unsigned int last_cell_index_;
833 unsigned int cell_index_;
838 class EvacuationScope BASE_EMBEDDED {
840 explicit EvacuationScope(MarkCompactCollector* collector)
841 : collector_(collector) {
842 collector_->set_evacuation(true);
845 ~EvacuationScope() { collector_->set_evacuation(false); }
848 MarkCompactCollector* collector_;
852 const char* AllocationSpaceName(AllocationSpace space);
853 } // namespace internal
856 #endif // V8_HEAP_MARK_COMPACT_H_