1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 #ifndef V8_MARK_COMPACT_H_
29 #define V8_MARK_COMPACT_H_
31 #include "compiler-intrinsics.h"
37 // Callback function, returns whether an object is alive. The heap size
38 // of the object is returned in size. It optionally updates the offset
39 // to the first live object in the page (only used for old and map objects).
40 typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
42 // Forward declarations.
46 class RootMarkingVisitor;
51 explicit Marking(Heap* heap)
55 static inline MarkBit MarkBitFrom(Address addr);
57 static inline MarkBit MarkBitFrom(HeapObject* obj) {
58 return MarkBitFrom(reinterpret_cast<Address>(obj));
61 // Impossible markbits: 01
62 static const char* kImpossibleBitPattern;
63 static inline bool IsImpossible(MarkBit mark_bit) {
64 return !mark_bit.Get() && mark_bit.Next().Get();
67 // Black markbits: 10 - this is required by the sweeper.
68 static const char* kBlackBitPattern;
69 static inline bool IsBlack(MarkBit mark_bit) {
70 return mark_bit.Get() && !mark_bit.Next().Get();
73 // White markbits: 00 - this is required by the mark bit clearer.
74 static const char* kWhiteBitPattern;
75 static inline bool IsWhite(MarkBit mark_bit) {
76 return !mark_bit.Get();
80 static const char* kGreyBitPattern;
81 static inline bool IsGrey(MarkBit mark_bit) {
82 return mark_bit.Get() && mark_bit.Next().Get();
85 static inline void MarkBlack(MarkBit mark_bit) {
87 mark_bit.Next().Clear();
90 static inline void BlackToGrey(MarkBit markbit) {
94 static inline void WhiteToGrey(MarkBit markbit) {
99 static inline void GreyToBlack(MarkBit markbit) {
100 markbit.Next().Clear();
103 static inline void BlackToGrey(HeapObject* obj) {
104 BlackToGrey(MarkBitFrom(obj));
107 static inline void AnyToGrey(MarkBit markbit) {
109 markbit.Next().Set();
112 // Returns true if the the object whose mark is transferred is marked black.
113 bool TransferMark(Address old_start, Address new_start);
123 static const char* ColorName(ObjectColor color) {
125 case BLACK_OBJECT: return "black";
126 case WHITE_OBJECT: return "white";
127 case GREY_OBJECT: return "grey";
128 case IMPOSSIBLE_COLOR: return "impossible";
133 static ObjectColor Color(HeapObject* obj) {
134 return Color(Marking::MarkBitFrom(obj));
137 static ObjectColor Color(MarkBit mark_bit) {
138 if (IsBlack(mark_bit)) return BLACK_OBJECT;
139 if (IsWhite(mark_bit)) return WHITE_OBJECT;
140 if (IsGrey(mark_bit)) return GREY_OBJECT;
142 return IMPOSSIBLE_COLOR;
146 // Returns true if the transferred color is black.
147 INLINE(static bool TransferColor(HeapObject* from,
149 MarkBit from_mark_bit = MarkBitFrom(from);
150 MarkBit to_mark_bit = MarkBitFrom(to);
151 bool is_black = false;
152 if (from_mark_bit.Get()) {
154 is_black = true; // Looks black so far.
156 if (from_mark_bit.Next().Get()) {
157 to_mark_bit.Next().Set();
158 is_black = false; // Was actually gray.
167 // ----------------------------------------------------------------------------
168 // Marking deque for tracing live objects.
173 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
175 void Initialize(Address low, Address high) {
176 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
177 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
179 mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
184 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
186 inline bool IsEmpty() { return top_ == bottom_; }
188 bool overflowed() const { return overflowed_; }
190 void ClearOverflowed() { overflowed_ = false; }
192 void SetOverflowed() { overflowed_ = true; }
194 // Push the (marked) object on the marking stack if there is room,
195 // otherwise mark the object as overflowed and wait for a rescan of the
197 inline void PushBlack(HeapObject* object) {
198 ASSERT(object->IsHeapObject());
200 Marking::BlackToGrey(object);
201 MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
204 array_[top_] = object;
205 top_ = ((top_ + 1) & mask_);
209 inline void PushGrey(HeapObject* object) {
210 ASSERT(object->IsHeapObject());
214 array_[top_] = object;
215 top_ = ((top_ + 1) & mask_);
219 inline HeapObject* Pop() {
221 top_ = ((top_ - 1) & mask_);
222 HeapObject* object = array_[top_];
223 ASSERT(object->IsHeapObject());
227 inline void UnshiftGrey(HeapObject* object) {
228 ASSERT(object->IsHeapObject());
232 bottom_ = ((bottom_ - 1) & mask_);
233 array_[bottom_] = object;
237 HeapObject** array() { return array_; }
238 int bottom() { return bottom_; }
239 int top() { return top_; }
240 int mask() { return mask_; }
241 void set_top(int top) { top_ = top; }
245 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
246 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
253 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
257 class SlotsBufferAllocator {
259 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
260 void DeallocateBuffer(SlotsBuffer* buffer);
262 void DeallocateChain(SlotsBuffer** buffer_address);
266 // SlotsBuffer records a sequence of slots that has to be updated
267 // after live objects were relocated from evacuation candidates.
268 // All slots are either untyped or typed:
269 // - Untyped slots are expected to contain a tagged object pointer.
270 // They are recorded by an address.
271 // - Typed slots are expected to contain an encoded pointer to a heap
272 // object where the way of encoding depends on the type of the slot.
273 // They are recorded as a pair (SlotType, slot address).
274 // We assume that zero-page is never mapped this allows us to distinguish
275 // untyped slots from typed slots during iteration by a simple comparison:
276 // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
277 // is the first element of typed slot's pair.
280 typedef Object** ObjectSlot;
282 explicit SlotsBuffer(SlotsBuffer* next_buffer)
283 : idx_(0), chain_length_(1), next_(next_buffer) {
285 chain_length_ = next_->chain_length_ + 1;
292 void Add(ObjectSlot slot) {
293 ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
294 slots_[idx_++] = slot;
298 EMBEDDED_OBJECT_SLOT,
299 RELOCATED_CODE_OBJECT,
307 void UpdateSlots(Heap* heap);
309 void UpdateSlotsWithFilter(Heap* heap);
311 SlotsBuffer* next() { return next_; }
313 static int SizeOfChain(SlotsBuffer* buffer) {
314 if (buffer == NULL) return 0;
315 return static_cast<int>(buffer->idx_ +
316 (buffer->chain_length_ - 1) * kNumberOfElements);
319 inline bool IsFull() {
320 return idx_ == kNumberOfElements;
323 inline bool HasSpaceForTypedSlot() {
324 return idx_ < kNumberOfElements - 1;
327 static void UpdateSlotsRecordedIn(Heap* heap,
329 bool code_slots_filtering_required) {
330 while (buffer != NULL) {
331 if (code_slots_filtering_required) {
332 buffer->UpdateSlotsWithFilter(heap);
334 buffer->UpdateSlots(heap);
336 buffer = buffer->next();
345 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
346 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
349 static bool AddTo(SlotsBufferAllocator* allocator,
350 SlotsBuffer** buffer_address,
353 SlotsBuffer* buffer = *buffer_address;
354 if (buffer == NULL || buffer->IsFull()) {
355 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
356 allocator->DeallocateChain(buffer_address);
359 buffer = allocator->AllocateBuffer(buffer);
360 *buffer_address = buffer;
366 static bool IsTypedSlot(ObjectSlot slot);
368 static bool AddTo(SlotsBufferAllocator* allocator,
369 SlotsBuffer** buffer_address,
374 static const int kNumberOfElements = 1021;
377 static const int kChainLengthThreshold = 6;
380 intptr_t chain_length_;
382 ObjectSlot slots_[kNumberOfElements];
386 // -------------------------------------------------------------------------
387 // Mark-Compact collector
388 class MarkCompactCollector {
390 // Type of functions to compute forwarding addresses of objects in
391 // compacted spaces. Given an object and its size, return a (non-failure)
392 // Object* that will be the object after forwarding. There is a separate
393 // allocation function for each (compactable) space based on the location
394 // of the object before compaction.
395 typedef MaybeObject* (*AllocationFunction)(Heap* heap,
399 // Type of functions to encode the forwarding address for an object.
400 // Given the object, its size, and the new (non-failure) object it will be
401 // forwarded to, encode the forwarding address. For paged spaces, the
402 // 'offset' input/output parameter contains the offset of the forwarded
403 // object from the forwarding address of the previous live object in the
404 // page as input, and is updated to contain the offset to be used for the
405 // next live object in the same page. For spaces using a different
406 // encoding (ie, contiguous spaces), the offset parameter is ignored.
407 typedef void (*EncodingFunction)(Heap* heap,
408 HeapObject* old_object,
413 // Type of functions to process non-live objects.
414 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
416 // Pointer to member function, used in IterateLiveObjects.
417 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
419 // Set the global force_compaction flag, it must be called before Prepare
421 inline void SetFlags(int flags);
423 inline bool PreciseSweepingRequired() {
424 return sweep_precisely_;
427 static void Initialize();
429 void CollectEvacuationCandidates(PagedSpace* space);
431 void AddEvacuationCandidate(Page* p);
433 // Prepares for GC by resetting relocation info in old and map spaces and
434 // choosing spaces to compact.
435 void Prepare(GCTracer* tracer);
437 // Performs a global garbage collection.
438 void CollectGarbage();
440 bool StartCompaction();
442 void AbortCompaction();
444 // During a full GC, there is a stack-allocated GCTracer that is used for
445 // bookkeeping information. Return a pointer to that tracer.
446 GCTracer* tracer() { return tracer_; }
449 // Checks whether performing mark-compact collection.
450 bool in_use() { return state_ > PREPARE_GC; }
451 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
454 // Determine type of object and emit deletion log event.
455 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
457 // Distinguishable invalid map encodings (for single word and multiple words)
458 // that indicate free regions.
459 static const uint32_t kSingleFreeEncoding = 0;
460 static const uint32_t kMultiFreeEncoding = 1;
462 static inline bool IsMarked(Object* obj);
464 inline Heap* heap() const { return heap_; }
466 CodeFlusher* code_flusher() { return code_flusher_; }
467 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
468 void EnableCodeFlushing(bool enable);
477 void VerifyMarkbitsAreClean();
478 static void VerifyMarkbitsAreClean(PagedSpace* space);
479 static void VerifyMarkbitsAreClean(NewSpace* space);
482 // Sweep a single page from the given space conservatively.
483 // Return a number of reclaimed bytes.
484 static intptr_t SweepConservatively(PagedSpace* space, Page* p);
486 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
487 return Page::FromAddress(reinterpret_cast<Address>(anchor))->
488 ShouldSkipEvacuationSlotRecording();
491 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
492 return Page::FromAddress(reinterpret_cast<Address>(host))->
493 ShouldSkipEvacuationSlotRecording();
496 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
497 return Page::FromAddress(reinterpret_cast<Address>(obj))->
498 IsEvacuationCandidate();
501 void EvictEvacuationCandidate(Page* page) {
502 if (FLAG_trace_fragmentation) {
503 PrintF("Page %p is too popular. Disabling evacuation.\n",
504 reinterpret_cast<void*>(page));
507 // TODO(gc) If all evacuation candidates are too popular we
508 // should stop slots recording entirely.
509 page->ClearEvacuationCandidate();
511 // We were not collecting slots on this page that point
512 // to other evacuation candidates thus we have to
513 // rescan the page after evacuation to discover and update all
514 // pointers to evacuated objects.
515 if (page->owner()->identity() == OLD_DATA_SPACE) {
516 evacuation_candidates_.RemoveElement(page);
518 page->SetFlag(Page::RESCAN_ON_EVACUATION);
522 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
523 void RecordCodeEntrySlot(Address slot, Code* target);
525 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
527 void MigrateObject(Address dst,
530 AllocationSpace to_old_space);
532 bool TryPromoteObject(HeapObject* object, int object_size);
534 inline Object* encountered_weak_maps() { return encountered_weak_maps_; }
535 inline void set_encountered_weak_maps(Object* weak_map) {
536 encountered_weak_maps_ = weak_map;
539 void InvalidateCode(Code* code);
541 void ClearMarkbits();
544 MarkCompactCollector();
545 ~MarkCompactCollector();
547 bool MarkInvalidatedCode();
548 void RemoveDeadInvalidatedCode();
549 void ProcessInvalidatedCode(ObjectVisitor* visitor);
553 enum CollectorState {
558 ENCODE_FORWARDING_ADDRESSES,
563 // The current stage of the collector.
564 CollectorState state_;
567 // Global flag that forces sweeping to be precise, so we can traverse the
569 bool sweep_precisely_;
571 // True if we are collecting slots to perform evacuation from evacuation
575 bool was_marked_incrementally_;
579 // A pointer to the current stack-allocated GC tracer object during a full
580 // collection (NULL before and after).
583 SlotsBufferAllocator slots_buffer_allocator_;
585 SlotsBuffer* migration_slots_buffer_;
587 // Finishes GC, performs heap verification if enabled.
590 // -----------------------------------------------------------------------
591 // Phase 1: Marking live objects.
593 // Before: The heap has been prepared for garbage collection by
594 // MarkCompactCollector::Prepare() and is otherwise in its
597 // After: Live objects are marked and non-live objects are unmarked.
600 friend class RootMarkingVisitor;
601 friend class MarkingVisitor;
602 friend class StaticMarkingVisitor;
603 friend class CodeMarkingVisitor;
604 friend class SharedFunctionInfoMarkingVisitor;
606 void PrepareForCodeFlushing();
608 // Marking operations for objects reachable from roots.
609 void MarkLiveObjects();
613 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
615 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
617 void ProcessNewlyMarkedObject(HeapObject* obj);
619 // Creates back pointers for all map transitions, stores them in
620 // the prototype field. The original prototype pointers are restored
621 // in ClearNonLiveTransitions(). All JSObject maps
622 // connected by map transitions have the same prototype object, which
623 // is why we can use this field temporarily for back pointers.
624 void CreateBackPointers();
626 // Mark a Map and its DescriptorArray together, skipping transitions.
627 void MarkMapContents(Map* map);
628 void MarkDescriptorArray(DescriptorArray* descriptors);
630 // Mark the heap roots and all objects reachable from them.
631 void MarkRoots(RootMarkingVisitor* visitor);
633 // Mark the symbol table specially. References to symbols from the
634 // symbol table are weak.
635 void MarkSymbolTable();
637 // Mark objects in object groups that have at least one object in the
639 void MarkObjectGroups();
641 // Mark objects in implicit references groups if their parent object
643 void MarkImplicitRefGroups();
645 // Mark all objects which are reachable due to host application
646 // logic like object groups or implicit references' groups.
647 void ProcessExternalMarking();
649 // Mark objects reachable (transitively) from objects in the marking stack
650 // or overflowed in the heap.
651 void ProcessMarkingDeque();
653 // Mark objects reachable (transitively) from objects in the marking
654 // stack. This function empties the marking stack, but may leave
655 // overflowed objects in the heap, in which case the marking stack's
656 // overflow flag will be set.
657 void EmptyMarkingDeque();
659 // Refill the marking stack with overflowed objects from the heap. This
660 // function either leaves the marking stack full or clears the overflow
661 // flag on the marking stack.
662 void RefillMarkingDeque();
664 // After reachable maps have been marked process per context object
665 // literal map caches removing unmarked entries.
666 void ProcessMapCaches();
668 // Callback function for telling whether the object *p is an unmarked
670 static bool IsUnmarkedHeapObject(Object** p);
672 // Map transitions from a live map to a dead map must be killed.
673 // We replace them with a null descriptor, with the same key.
674 void ClearNonLiveTransitions();
676 // Marking detaches initial maps from SharedFunctionInfo objects
677 // to make this reference weak. We need to reattach initial maps
678 // back after collection. This is either done during
679 // ClearNonLiveTransitions pass or by calling this function.
680 void ReattachInitialMaps();
682 // Mark all values associated with reachable keys in weak maps encountered
683 // so far. This might push new object or even new weak maps onto the
685 void ProcessWeakMaps();
687 // After all reachable objects have been marked those weak map entries
688 // with an unreachable key are removed from all encountered weak maps.
689 // The linked list of all encountered weak maps is destroyed.
690 void ClearWeakMaps();
692 // -----------------------------------------------------------------------
693 // Phase 2: Sweeping to clear mark bits and free non-live objects for
694 // a non-compacting collection.
696 // Before: Live objects are marked and non-live objects are unmarked.
698 // After: Live objects are unmarked, non-live regions have been added to
699 // their space's free list. Active eden semispace is compacted by
703 // If we are not compacting the heap, we simply sweep the spaces except
704 // for the large object space, clearing mark bits and adding unmarked
705 // regions to each space's free list.
708 void EvacuateNewSpace();
710 void EvacuateLiveObjectsFromPage(Page* p);
712 void EvacuatePages();
714 void EvacuateNewSpaceAndCandidates();
716 void SweepSpace(PagedSpace* space, SweeperType sweeper);
719 friend class MarkObjectVisitor;
720 static void VisitObject(HeapObject* obj);
722 friend class UnmarkObjectVisitor;
723 static void UnmarkObject(HeapObject* obj);
727 MarkingDeque marking_deque_;
728 CodeFlusher* code_flusher_;
729 Object* encountered_weak_maps_;
731 List<Page*> evacuation_candidates_;
732 List<Code*> invalidated_code_;
738 const char* AllocationSpaceName(AllocationSpace space);
740 } } // namespace v8::internal
742 #endif // V8_MARK_COMPACT_H_