1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
8 #include "src/allocation.h"
9 #include "src/atomic-utils.h"
10 #include "src/base/atomicops.h"
11 #include "src/base/bits.h"
12 #include "src/base/platform/mutex.h"
13 #include "src/flags.h"
14 #include "src/hashmap.h"
16 #include "src/objects.h"
17 #include "src/utils.h"
24 // -----------------------------------------------------------------------------
27 // A JS heap consists of a young generation, an old generation, and a large
28 // object space. The young generation is divided into two semispaces. A
29 // scavenger implements Cheney's copying algorithm. The old generation is
30 // separated into a map space and an old object space. The map space contains
31 // all (and only) map objects, the rest of old objects go into the old space.
32 // The old generation is collected by a mark-sweep-compact collector.
34 // The semispaces of the young generation are contiguous. The old and map
35 // spaces consists of a list of pages. A page has a page header and an object
38 // There is a separate large object space for objects larger than
39 // Page::kMaxHeapObjectSize, so that they do not have to move during
40 // collection. The large object space is paged. Pages in large object space
41 // may be larger than the page size.
43 // A store-buffer based write barrier is used to keep track of intergenerational
44 // references. See heap/store-buffer.h.
46 // During scavenges and mark-sweep collections we sometimes (after a store
47 // buffer overflow) iterate intergenerational pointers without decoding heap
48 // object maps so if the page belongs to old space or large object space
49 // it is essential to guarantee that the page does not contain any
50 // garbage pointers to new space: every pointer aligned word which satisfies
51 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
52 // new space. Thus objects in old space and large object spaces should have a
53 // special layout (e.g. no bare integer fields). This requirement does not
54 // apply to map space which is iterated in a special fashion. However we still
55 // require pointer fields of dead maps to be cleaned.
57 // To enable lazy cleaning of old space pages we can mark chunks of the page
58 // as being garbage. Garbage sections are marked with a special map. These
59 // sections are skipped when scanning the page, even if we are otherwise
60 // scanning without regard for object boundaries. Garbage sections are chained
61 // together to form a free list after a GC. Garbage sections created outside
62 // of GCs by object trunctation etc. may not be in the free list chain. Very
63 // small free spaces are ignored, they need only be cleaned of bogus pointers
66 // Each page may have up to one special garbage section. The start of this
67 // section is denoted by the top field in the space. The end of the section
68 // is denoted by the limit field in the space. This special garbage section
69 // is not marked with a free space map in the data. The point of this section
70 // is to enable linear allocation without having to constantly update the byte
71 // array every time the top field is updated and a new object is created. The
72 // special garbage section is not in the chain of garbage sections.
74 // Since the top and limit fields are in the space, not the page, only one page
75 // has a special garbage section, and if the top and limit are equal then there
76 // is no special garbage section.
78 // Some assertion macros used in the debugging mode.
80 #define DCHECK_PAGE_ALIGNED(address) \
81 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
83 #define DCHECK_OBJECT_ALIGNED(address) \
84 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
86 #define DCHECK_OBJECT_SIZE(size) \
87 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
89 #define DCHECK_PAGE_OFFSET(offset) \
90 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
92 #define DCHECK_MAP_PAGE_INDEX(index) \
93 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
96 class CompactionSpace;
98 class MemoryAllocator;
105 typedef uint32_t CellType;
107 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
110 bool operator==(const MarkBit& other) {
111 return cell_ == other.cell_ && mask_ == other.mask_;
116 inline CellType* cell() { return cell_; }
117 inline CellType mask() { return mask_; }
119 inline MarkBit Next() {
120 CellType new_mask = mask_ << 1;
122 return MarkBit(cell_ + 1, 1);
124 return MarkBit(cell_, new_mask);
128 inline void Set() { *cell_ |= mask_; }
129 inline bool Get() { return (*cell_ & mask_) != 0; }
130 inline void Clear() { *cell_ &= ~mask_; }
135 friend class Marking;
139 // Bitmap is a sequence of cells each containing fixed number of bits.
142 static const uint32_t kBitsPerCell = 32;
143 static const uint32_t kBitsPerCellLog2 = 5;
144 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
145 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
146 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
148 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
150 static const size_t kSize =
151 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
154 static int CellsForLength(int length) {
155 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
158 int CellsCount() { return CellsForLength(kLength); }
160 static int SizeFor(int cells_count) {
161 return sizeof(MarkBit::CellType) * cells_count;
164 INLINE(static uint32_t IndexToCell(uint32_t index)) {
165 return index >> kBitsPerCellLog2;
168 INLINE(static uint32_t CellToIndex(uint32_t index)) {
169 return index << kBitsPerCellLog2;
172 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
173 return (index + kBitIndexMask) & ~kBitIndexMask;
176 INLINE(MarkBit::CellType* cells()) {
177 return reinterpret_cast<MarkBit::CellType*>(this);
180 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
182 INLINE(static Bitmap* FromAddress(Address addr)) {
183 return reinterpret_cast<Bitmap*>(addr);
186 inline MarkBit MarkBitFromIndex(uint32_t index) {
187 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
188 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
189 return MarkBit(cell, mask);
192 static inline void Clear(MemoryChunk* chunk);
194 static void PrintWord(uint32_t word, uint32_t himask = 0) {
195 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
196 if ((mask & himask) != 0) PrintF("[");
197 PrintF((mask & word) ? "1" : "0");
198 if ((mask & himask) != 0) PrintF("]");
204 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
206 void Print(uint32_t pos, uint32_t cell) {
207 if (cell == seq_type) {
227 if (seq_length > 0) {
228 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
229 seq_length * kBitsPerCell);
234 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
244 for (int i = 0; i < CellsCount(); i++) {
245 printer.Print(i, cells()[i]);
252 for (int i = 0; i < CellsCount(); i++) {
253 if (cells()[i] != 0) {
265 // MemoryChunk represents a memory region owned by a specific space.
266 // It is divided into the header and the body. Chunk start is always
267 // 1MB aligned. Start of the body is aligned so it can accommodate
271 enum MemoryChunkFlags {
274 POINTERS_TO_HERE_ARE_INTERESTING,
275 POINTERS_FROM_HERE_ARE_INTERESTING,
277 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
278 IN_TO_SPACE, // All pages in new space has one of these two set.
279 NEW_SPACE_BELOW_AGE_MARK,
280 EVACUATION_CANDIDATE,
281 RESCAN_ON_EVACUATION,
282 NEVER_EVACUATE, // May contain immortal immutables.
283 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
285 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
286 // otherwise marking bits are still intact.
289 // Large objects can have a progress bar in their page header. These object
290 // are scanned in increments and will be kept black while being scanned.
291 // Even if the mutator writes to them they will be kept black and a white
292 // to grey transition is performed in the value.
295 // This flag is intended to be used for testing. Works only when both
296 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
297 // are set. It forces the page to become an evacuation candidate at next
298 // candidates selection cycle.
299 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
301 // The memory chunk is already logically freed, however the actual freeing
302 // still has to be performed.
305 // Last flag, keep at bottom.
306 NUM_MEMORY_CHUNK_FLAGS
309 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
310 // |kCompactingInProgress|: Parallel compaction is currently in progress.
311 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
313 // |kCompactingAborted|: Parallel compaction has been aborted, which should
314 // for now only happen in OOM scenarios.
315 enum ParallelCompactingState {
317 kCompactingInProgress,
322 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
323 // not be performed on that page.
324 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
325 // not touch the page memory anymore.
326 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
327 // |kSweepingPending|: This page is ready for parallel sweeping.
328 enum ParallelSweepingState {
335 // Every n write barrier invocations we go to runtime even though
336 // we could have handled it in generated code. This lets us check
337 // whether we have hit the limit and should do some more marking.
338 static const int kWriteBarrierCounterGranularity = 500;
340 static const int kPointersToHereAreInterestingMask =
341 1 << POINTERS_TO_HERE_ARE_INTERESTING;
343 static const int kPointersFromHereAreInterestingMask =
344 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
346 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
348 static const int kSkipEvacuationSlotsRecordingMask =
349 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
350 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
352 static const intptr_t kAlignment =
353 (static_cast<uintptr_t>(1) << kPageSizeBits);
355 static const intptr_t kAlignmentMask = kAlignment - 1;
357 static const intptr_t kSizeOffset = 0;
359 static const intptr_t kLiveBytesOffset =
360 kSizeOffset + kPointerSize // size_t size
361 + kIntptrSize // intptr_t flags_
362 + kPointerSize // Address area_start_
363 + kPointerSize // Address area_end_
364 + 2 * kPointerSize // base::VirtualMemory reservation_
365 + kPointerSize // Address owner_
366 + kPointerSize // Heap* heap_
367 + kIntSize; // int store_buffer_counter_
369 static const size_t kSlotsBufferOffset =
370 kLiveBytesOffset + kIntSize; // int live_byte_count_
372 static const size_t kWriteBarrierCounterOffset =
373 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_;
374 + kPointerSize; // SkipList* skip_list_;
376 static const size_t kMinHeaderSize =
377 kWriteBarrierCounterOffset +
378 kIntptrSize // intptr_t write_barrier_counter_
379 + kIntSize // int progress_bar_
380 + kPointerSize // AtomicValue high_water_mark_
381 + kPointerSize // base::Mutex* mutex_
382 + kPointerSize // base::AtomicWord parallel_sweeping_
383 + kPointerSize // AtomicValue parallel_compaction_
384 + 5 * kPointerSize // AtomicNumber free-list statistics
385 + kPointerSize // AtomicValue next_chunk_
386 + kPointerSize; // AtomicValue prev_chunk_
388 // We add some more space to the computed header size to amount for missing
389 // alignment requirements in our computation.
390 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
391 static const size_t kHeaderSize = kMinHeaderSize + kIntSize;
393 static const int kBodyOffset =
394 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
396 // The start offset of the object area in a page. Aligned to both maps and
397 // code alignment to be suitable for both. Also aligned to 32 words because
398 // the marking bitmap is arranged in 32 bit chunks.
399 static const int kObjectStartAlignment = 32 * kPointerSize;
400 static const int kObjectStartOffset =
402 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
404 static const int kFlagsOffset = kPointerSize;
406 static void IncrementLiveBytesFromMutator(HeapObject* object, int by);
408 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
409 static MemoryChunk* FromAddress(Address a) {
410 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
413 static const MemoryChunk* FromAddress(const byte* a) {
414 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
418 static void IncrementLiveBytesFromGC(HeapObject* object, int by) {
419 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
422 // Only works for addresses in pointer spaces, not data or code spaces.
423 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
425 static inline uint32_t FastAddressToMarkbitIndex(Address addr) {
426 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
427 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
430 static inline void UpdateHighWaterMark(Address mark) {
431 if (mark == nullptr) return;
432 // Need to subtract one from the mark because when a chunk is full the
433 // top points to the next address after the chunk, which effectively belongs
434 // to another chunk. See the comment to Page::FromAllocationTop.
435 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
436 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
437 intptr_t old_mark = 0;
439 old_mark = chunk->high_water_mark_.Value();
440 } while ((new_mark > old_mark) &&
441 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
444 Address address() { return reinterpret_cast<Address>(this); }
446 bool is_valid() { return address() != NULL; }
448 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
450 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
452 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
454 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
456 Space* owner() const {
457 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
459 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
466 void set_owner(Space* space) {
467 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
468 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
469 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
473 base::VirtualMemory* reserved_memory() { return &reservation_; }
475 void set_reserved_memory(base::VirtualMemory* reservation) {
476 DCHECK_NOT_NULL(reservation);
477 reservation_.TakeControl(reservation);
480 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
481 void initialize_scan_on_scavenge(bool scan) {
483 SetFlag(SCAN_ON_SCAVENGE);
485 ClearFlag(SCAN_ON_SCAVENGE);
488 inline void set_scan_on_scavenge(bool scan);
490 int store_buffer_counter() { return store_buffer_counter_; }
491 void set_store_buffer_counter(int counter) {
492 store_buffer_counter_ = counter;
495 bool Contains(Address addr) {
496 return addr >= area_start() && addr < area_end();
499 // Checks whether addr can be a limit of addresses in this page.
500 // It's a limit if it's in the page, or if it's just after the
501 // last byte of the page.
502 bool ContainsLimit(Address addr) {
503 return addr >= area_start() && addr <= area_end();
506 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
508 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
510 void SetFlagTo(int flag, bool value) {
518 bool IsFlagSet(int flag) {
519 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
522 // Set or clear multiple flags at a time. The flags in the mask
523 // are set to the value in "flags", the rest retain the current value
525 void SetFlags(intptr_t flags, intptr_t mask) {
526 flags_ = (flags_ & ~mask) | (flags & mask);
529 // Return all current flags.
530 intptr_t GetFlags() { return flags_; }
532 AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
533 return parallel_sweeping_;
536 AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
537 return parallel_compaction_;
540 bool TryLock() { return mutex_->TryLock(); }
542 base::Mutex* mutex() { return mutex_; }
544 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
545 // progress. In particular, when we know that right before this call a
546 // sweeper thread was sweeping this page.
547 void WaitUntilSweepingCompleted() {
550 DCHECK(SweepingCompleted());
553 bool SweepingCompleted() {
554 return parallel_sweeping_state().Value() <= kSweepingFinalize;
557 // Manage live byte count (count of bytes known to be live,
558 // because they are marked black).
559 void ResetLiveBytes() {
560 if (FLAG_gc_verbose) {
561 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
564 live_byte_count_ = 0;
566 void IncrementLiveBytes(int by) {
567 if (FLAG_gc_verbose) {
568 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
569 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
570 live_byte_count_ + by);
572 live_byte_count_ += by;
573 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
576 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
577 return live_byte_count_;
580 int write_barrier_counter() {
581 return static_cast<int>(write_barrier_counter_);
584 void set_write_barrier_counter(int counter) {
585 write_barrier_counter_ = counter;
589 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
590 return progress_bar_;
593 void set_progress_bar(int progress_bar) {
594 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
595 progress_bar_ = progress_bar;
598 void ResetProgressBar() {
599 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
601 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
605 bool IsLeftOfProgressBar(Object** slot) {
606 Address slot_address = reinterpret_cast<Address>(slot);
607 DCHECK(slot_address > this->address());
608 return (slot_address - (this->address() + kObjectStartOffset)) <
612 size_t size() const { return size_; }
614 void set_size(size_t size) { size_ = size; }
616 void SetArea(Address area_start, Address area_end) {
617 area_start_ = area_start;
618 area_end_ = area_end;
621 Executability executable() {
622 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
626 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
629 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
631 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
635 inline Bitmap* markbits() {
636 return Bitmap::FromAddress(address() + kHeaderSize);
639 void PrintMarkbits() { markbits()->Print(); }
641 inline uint32_t AddressToMarkbitIndex(Address addr) {
642 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
645 inline Address MarkbitIndexToAddress(uint32_t index) {
646 return this->address() + (index << kPointerSizeLog2);
649 void InsertAfter(MemoryChunk* other);
652 inline Heap* heap() const { return heap_; }
654 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
656 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
658 bool IsEvacuationCandidate() {
659 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
660 return IsFlagSet(EVACUATION_CANDIDATE);
663 bool ShouldSkipEvacuationSlotRecording() {
664 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
667 inline SkipList* skip_list() { return skip_list_; }
669 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
671 inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
673 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
675 void MarkEvacuationCandidate() {
676 DCHECK(!IsFlagSet(NEVER_EVACUATE));
677 DCHECK(slots_buffer_ == NULL);
678 SetFlag(EVACUATION_CANDIDATE);
681 void ClearEvacuationCandidate() {
682 DCHECK(slots_buffer_ == NULL);
683 ClearFlag(EVACUATION_CANDIDATE);
686 Address area_start() { return area_start_; }
687 Address area_end() { return area_end_; }
688 int area_size() { return static_cast<int>(area_end() - area_start()); }
689 bool CommitArea(size_t requested);
691 // Approximate amount of physical memory committed for this chunk.
692 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
694 // Should be called when memory chunk is about to be freed.
695 void ReleaseAllocatedMemory();
698 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
699 Address area_start, Address area_end,
700 Executability executable, Space* owner);
705 // Start and end of allocatable memory on this chunk.
709 // If the chunk needs to remember its memory reservation, it is stored here.
710 base::VirtualMemory reservation_;
711 // The identity of the owning space. This is tagged as a failure pointer, but
712 // no failure can be in an object, so this can be distinguished from any entry
716 // Used by the store buffer to keep track of which pages to mark scan-on-
718 int store_buffer_counter_;
719 // Count of bytes marked black on page.
720 int live_byte_count_;
721 SlotsBuffer* slots_buffer_;
722 SkipList* skip_list_;
723 intptr_t write_barrier_counter_;
724 // Used by the incremental marker to keep track of the scanning progress in
725 // large objects that have a progress bar and are scanned in increments.
727 // Assuming the initial allocation on a page is sequential,
728 // count highest number of bytes ever allocated on the page.
729 AtomicValue<intptr_t> high_water_mark_;
732 AtomicValue<ParallelSweepingState> parallel_sweeping_;
733 AtomicValue<ParallelCompactingState> parallel_compaction_;
735 // PagedSpace free-list statistics.
736 AtomicNumber<intptr_t> available_in_small_free_list_;
737 AtomicNumber<intptr_t> available_in_medium_free_list_;
738 AtomicNumber<intptr_t> available_in_large_free_list_;
739 AtomicNumber<intptr_t> available_in_huge_free_list_;
740 AtomicNumber<intptr_t> non_available_small_blocks_;
742 // next_chunk_ holds a pointer of type MemoryChunk
743 AtomicValue<MemoryChunk*> next_chunk_;
744 // prev_chunk_ holds a pointer of type MemoryChunk
745 AtomicValue<MemoryChunk*> prev_chunk_;
748 void InitializeReservedMemory() { reservation_.Reset(); }
750 friend class MemoryAllocator;
751 friend class MemoryChunkValidator;
755 // -----------------------------------------------------------------------------
756 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
758 // The only way to get a page pointer is by calling factory methods:
759 // Page* p = Page::FromAddress(addr); or
760 // Page* p = Page::FromAllocationTop(top);
761 class Page : public MemoryChunk {
763 // Returns the page containing a given address. The address ranges
764 // from [page_addr .. page_addr + kPageSize[
765 // This only works if the object is in fact in a page. See also MemoryChunk::
766 // FromAddress() and FromAnyAddress().
767 INLINE(static Page* FromAddress(Address a)) {
768 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
771 // Returns the page containing an allocation top. Because an allocation
772 // top address can be the upper bound of the page, we need to subtract
773 // it with kPointerSize first. The address ranges from
774 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
775 INLINE(static Page* FromAllocationTop(Address top)) {
776 Page* p = FromAddress(top - kPointerSize);
780 // Returns the next page in the chain of pages owned by a space.
781 inline Page* next_page() {
782 DCHECK(next_chunk()->owner() == owner());
783 return static_cast<Page*>(next_chunk());
785 inline Page* prev_page() {
786 DCHECK(prev_chunk()->owner() == owner());
787 return static_cast<Page*>(prev_chunk());
789 inline void set_next_page(Page* page);
790 inline void set_prev_page(Page* page);
792 // Checks whether an address is page aligned.
793 static bool IsAlignedToPageSize(Address a) {
794 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
797 // Returns the offset of a given address to this page.
798 INLINE(int Offset(Address a)) {
799 int offset = static_cast<int>(a - address());
803 // Returns the address for a given offset to the this page.
804 Address OffsetToAddress(int offset) {
805 DCHECK_PAGE_OFFSET(offset);
806 return address() + offset;
809 // ---------------------------------------------------------------------
811 // Page size in bytes. This must be a multiple of the OS page size.
812 static const int kPageSize = 1 << kPageSizeBits;
814 // Maximum object size that fits in a page. Objects larger than that size
815 // are allocated in large object space and are never moved in memory. This
816 // also applies to new space allocation, since objects are never migrated
817 // from new space to large object space. Takes double alignment into account.
818 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
821 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
823 inline void ClearGCFields();
825 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
826 Executability executable, PagedSpace* owner);
828 void InitializeAsAnchor(PagedSpace* owner);
830 bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
831 void SetWasSwept() { SetFlag(WAS_SWEPT); }
832 void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
834 void ResetFreeListStatistics();
836 int LiveBytesFromFreeList() {
837 return static_cast<int>(
838 area_size() - non_available_small_blocks() -
839 available_in_small_free_list() - available_in_medium_free_list() -
840 available_in_large_free_list() - available_in_huge_free_list());
843 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
844 type name() { return name##_.Value(); } \
845 void set_##name(type name) { name##_.SetValue(name); } \
846 void add_##name(type name) { name##_.Increment(name); }
848 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
849 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
850 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
851 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
852 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
854 #undef FRAGMENTATION_STATS_ACCESSORS
860 friend class MemoryAllocator;
864 class LargePage : public MemoryChunk {
866 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
868 inline LargePage* next_page() {
869 return static_cast<LargePage*>(next_chunk());
872 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
875 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
877 friend class MemoryAllocator;
881 // ----------------------------------------------------------------------------
882 // Space is the abstract superclass for all allocation spaces.
883 class Space : public Malloced {
885 Space(Heap* heap, AllocationSpace id, Executability executable)
886 : heap_(heap), id_(id), executable_(executable) {}
890 Heap* heap() const { return heap_; }
892 // Does the space need executable memory?
893 Executability executable() { return executable_; }
895 // Identity used in error reporting.
896 AllocationSpace identity() { return id_; }
898 // Returns allocated size.
899 virtual intptr_t Size() = 0;
901 // Returns size of objects. Can differ from the allocated size
902 // (e.g. see LargeObjectSpace).
903 virtual intptr_t SizeOfObjects() { return Size(); }
905 // Return the total amount of memory committed for new space.
906 virtual intptr_t CommittedMemory() = 0;
908 // Approximate amount of physical memory committed for this space.
909 virtual size_t CommittedPhysicalMemory() = 0;
911 // Return the available bytes without growing.
912 virtual intptr_t Available() = 0;
914 virtual int RoundSizeDownToObjectAlignment(int size) {
915 if (id_ == CODE_SPACE) {
916 return RoundDown(size, kCodeAlignment);
918 return RoundDown(size, kPointerSize);
923 virtual void Print() = 0;
929 Executability executable_;
933 class MemoryChunkValidator {
934 // Computed offsets should match the compiler generated ones.
935 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
936 STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
937 offsetof(MemoryChunk, live_byte_count_));
938 STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
939 offsetof(MemoryChunk, slots_buffer_));
940 STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
941 offsetof(MemoryChunk, write_barrier_counter_));
943 // Validate our estimates on the header size.
944 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
945 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
946 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
950 // ----------------------------------------------------------------------------
951 // All heap objects containing executable code (code objects) must be allocated
952 // from a 2 GB range of memory, so that they can call each other using 32-bit
953 // displacements. This happens automatically on 32-bit platforms, where 32-bit
954 // displacements cover the entire 4GB virtual address space. On 64-bit
955 // platforms, we support this using the CodeRange object, which reserves and
956 // manages a range of virtual memory.
959 explicit CodeRange(Isolate* isolate);
960 ~CodeRange() { TearDown(); }
962 // Reserves a range of virtual memory, but does not commit any of it.
963 // Can only be called once, at heap initialization time.
964 // Returns false on failure.
965 bool SetUp(size_t requested_size);
967 bool valid() { return code_range_ != NULL; }
970 return static_cast<Address>(code_range_->address());
974 return code_range_->size();
976 bool contains(Address address) {
977 if (!valid()) return false;
978 Address start = static_cast<Address>(code_range_->address());
979 return start <= address && address < start + code_range_->size();
982 // Allocates a chunk of memory from the large-object portion of
983 // the code range. On platforms with no separate code range, should
985 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
986 const size_t commit_size,
988 bool CommitRawMemory(Address start, size_t length);
989 bool UncommitRawMemory(Address start, size_t length);
990 void FreeRawMemory(Address buf, size_t length);
993 // Frees the range of virtual memory, and frees the data structures used to
999 // The reserved range of virtual memory that all code objects are put in.
1000 base::VirtualMemory* code_range_;
1001 // Plain old data class, just a struct plus a constructor.
1004 FreeBlock() : start(0), size(0) {}
1005 FreeBlock(Address start_arg, size_t size_arg)
1006 : start(start_arg), size(size_arg) {
1007 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1008 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1010 FreeBlock(void* start_arg, size_t size_arg)
1011 : start(static_cast<Address>(start_arg)), size(size_arg) {
1012 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1013 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1020 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1021 // access both lists concurrently to the main thread.
1022 base::Mutex code_range_mutex_;
1024 // Freed blocks of memory are added to the free list. When the allocation
1025 // list is exhausted, the free list is sorted and merged to make the new
1027 List<FreeBlock> free_list_;
1029 // Memory is allocated from the free blocks on the allocation list.
1030 // The block at current_allocation_block_index_ is the current block.
1031 List<FreeBlock> allocation_list_;
1032 int current_allocation_block_index_;
1034 // Finds a block on the allocation list that contains at least the
1035 // requested amount of memory. If none is found, sorts and merges
1036 // the existing free memory blocks, and searches again.
1037 // If none can be found, returns false.
1038 bool GetNextAllocationBlock(size_t requested);
1039 // Compares the start addresses of two free blocks.
1040 static int CompareFreeBlockAddress(const FreeBlock* left,
1041 const FreeBlock* right);
1042 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1043 void ReleaseBlock(const FreeBlock* block);
1045 DISALLOW_COPY_AND_ASSIGN(CodeRange);
1051 SkipList() { Clear(); }
1054 for (int idx = 0; idx < kSize; idx++) {
1055 starts_[idx] = reinterpret_cast<Address>(-1);
1059 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1061 void AddObject(Address addr, int size) {
1062 int start_region = RegionNumber(addr);
1063 int end_region = RegionNumber(addr + size - kPointerSize);
1064 for (int idx = start_region; idx <= end_region; idx++) {
1065 if (starts_[idx] > addr) starts_[idx] = addr;
1069 static inline int RegionNumber(Address addr) {
1070 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1073 static void Update(Address addr, int size) {
1074 Page* page = Page::FromAddress(addr);
1075 SkipList* list = page->skip_list();
1077 list = new SkipList();
1078 page->set_skip_list(list);
1081 list->AddObject(addr, size);
1085 static const int kRegionSizeLog2 = 13;
1086 static const int kRegionSize = 1 << kRegionSizeLog2;
1087 static const int kSize = Page::kPageSize / kRegionSize;
1089 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1091 Address starts_[kSize];
1095 // ----------------------------------------------------------------------------
1096 // A space acquires chunks of memory from the operating system. The memory
1097 // allocator allocated and deallocates pages for the paged heap spaces and large
1098 // pages for large object space.
1100 // Each space has to manage it's own pages.
1102 class MemoryAllocator {
1104 explicit MemoryAllocator(Isolate* isolate);
1106 // Initializes its internal bookkeeping structures.
1107 // Max capacity of the total space and executable memory limit.
1108 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1112 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1113 Executability executable);
1115 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1116 Executability executable);
1118 // PreFree logically frees the object, i.e., it takes care of the size
1119 // bookkeeping and calls the allocation callback.
1120 void PreFreeMemory(MemoryChunk* chunk);
1122 // FreeMemory can be called concurrently when PreFree was executed before.
1123 void PerformFreeMemory(MemoryChunk* chunk);
1125 // Free is a wrapper method, which calls PreFree and PerformFreeMemory
1127 void Free(MemoryChunk* chunk);
1129 // Returns allocated spaces in bytes.
1130 intptr_t Size() { return size_.Value(); }
1132 // Returns allocated executable spaces in bytes.
1133 intptr_t SizeExecutable() { return size_executable_.Value(); }
1135 // Returns the maximum available bytes of heaps.
1136 intptr_t Available() {
1137 intptr_t size = Size();
1138 return capacity_ < size ? 0 : capacity_ - size;
1141 // Returns the maximum available executable bytes of heaps.
1142 intptr_t AvailableExecutable() {
1143 intptr_t executable_size = SizeExecutable();
1144 if (capacity_executable_ < executable_size) return 0;
1145 return capacity_executable_ - executable_size;
1148 // Returns maximum available bytes that the old space can have.
1149 intptr_t MaxAvailable() {
1150 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1153 // Returns an indication of whether a pointer is in a space that has
1154 // been allocated by this MemoryAllocator.
1155 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1156 return address < lowest_ever_allocated_.Value() ||
1157 address >= highest_ever_allocated_.Value();
1161 // Reports statistic info of the space.
1162 void ReportStatistics();
1165 // Returns a MemoryChunk in which the memory region from commit_area_size to
1166 // reserve_area_size of the chunk area is reserved but not committed, it
1167 // could be committed later by calling MemoryChunk::CommitArea.
1168 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1169 intptr_t commit_area_size,
1170 Executability executable, Space* space);
1172 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1173 base::VirtualMemory* controller);
1174 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1175 size_t alignment, Executability executable,
1176 base::VirtualMemory* controller);
1178 bool CommitMemory(Address addr, size_t size, Executability executable);
1180 void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
1181 Executability executable);
1182 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1183 void FreeMemory(Address addr, size_t size, Executability executable);
1185 // Commit a contiguous block of memory from the initial chunk. Assumes that
1186 // the address is not NULL, the size is greater than zero, and that the
1187 // block is contained in the initial chunk. Returns true if it succeeded
1188 // and false otherwise.
1189 bool CommitBlock(Address start, size_t size, Executability executable);
1191 // Uncommit a contiguous block of memory [start..(start+size)[.
1192 // start is not NULL, the size is greater than zero, and the
1193 // block is contained in the initial chunk. Returns true if it succeeded
1194 // and false otherwise.
1195 bool UncommitBlock(Address start, size_t size);
1197 // Zaps a contiguous block of memory [start..(start+size)[ thus
1198 // filling it up with a recognizable non-NULL bit pattern.
1199 void ZapBlock(Address start, size_t size);
1201 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
1204 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1205 ObjectSpace space, AllocationAction action);
1207 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
1209 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
1211 static int CodePageGuardStartOffset();
1213 static int CodePageGuardSize();
1215 static int CodePageAreaStartOffset();
1217 static int CodePageAreaEndOffset();
1219 static int CodePageAreaSize() {
1220 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1223 static int PageAreaSize(AllocationSpace space) {
1224 DCHECK_NE(LO_SPACE, space);
1225 return (space == CODE_SPACE) ? CodePageAreaSize()
1226 : Page::kMaxRegularHeapObjectSize;
1229 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1230 Address start, size_t commit_size,
1231 size_t reserved_size);
1236 // Maximum space size in bytes.
1238 // Maximum subset of capacity_ that can be executable
1239 intptr_t capacity_executable_;
1241 // Allocated space size in bytes.
1242 AtomicNumber<intptr_t> size_;
1243 // Allocated executable space size in bytes.
1244 AtomicNumber<intptr_t> size_executable_;
1246 // We keep the lowest and highest addresses allocated as a quick way
1247 // of determining that pointers are outside the heap. The estimate is
1248 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1249 // to our heap. The range is [lowest, highest[, inclusive on the low end
1250 // and exclusive on the high end.
1251 AtomicValue<void*> lowest_ever_allocated_;
1252 AtomicValue<void*> highest_ever_allocated_;
1254 struct MemoryAllocationCallbackRegistration {
1255 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1257 AllocationAction action)
1258 : callback(callback), space(space), action(action) {}
1259 MemoryAllocationCallback callback;
1261 AllocationAction action;
1264 // A List of callback that are triggered when memory is allocated or free'd
1265 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
1267 // Initializes pages in a chunk. Returns the first page address.
1268 // This function and GetChunkId() are provided for the mark-compact
1269 // collector to rebuild page headers in the from space, which is
1270 // used as a marking stack and its page headers are destroyed.
1271 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1274 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1275 // The use of atomic primitives does not guarantee correctness (wrt.
1276 // desired semantics) by default. The loop here ensures that we update the
1277 // values only if they did not change in between.
1278 void* ptr = nullptr;
1280 ptr = lowest_ever_allocated_.Value();
1281 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1283 ptr = highest_ever_allocated_.Value();
1284 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1287 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1291 // -----------------------------------------------------------------------------
1292 // Interface for heap object iterator to be implemented by all object space
1293 // object iterators.
1295 // NOTE: The space specific object iterators also implements the own next()
1296 // method which is used to avoid using virtual functions
1297 // iterating a specific space.
1299 class ObjectIterator : public Malloced {
1301 virtual ~ObjectIterator() {}
1303 virtual HeapObject* next_object() = 0;
1307 // -----------------------------------------------------------------------------
1308 // Heap object iterator in new/old/map spaces.
1310 // A HeapObjectIterator iterates objects from the bottom of the given space
1311 // to its top or from the bottom of the given page to its top.
1313 // If objects are allocated in the page during iteration the iterator may
1314 // or may not iterate over those objects. The caller must create a new
1315 // iterator in order to be sure to visit these new objects.
1316 class HeapObjectIterator : public ObjectIterator {
1318 // Creates a new object iterator in a given space.
1319 explicit HeapObjectIterator(PagedSpace* space);
1320 explicit HeapObjectIterator(Page* page);
1322 // Advance to the next object, skipping free spaces and other fillers and
1323 // skipping the special garbage section of which there is one per space.
1324 // Returns NULL when the iteration has ended.
1325 inline HeapObject* Next();
1326 virtual inline HeapObject* next_object();
1329 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1331 Address cur_addr_; // Current iteration point.
1332 Address cur_end_; // End iteration point.
1334 PageMode page_mode_;
1336 // Fast (inlined) path of next().
1337 inline HeapObject* FromCurrentPage();
1339 // Slow path of next(), goes into the next page. Returns false if the
1340 // iteration has ended.
1341 bool AdvanceToNextPage();
1343 // Initializes fields.
1344 inline void Initialize(PagedSpace* owner, Address start, Address end,
1349 // -----------------------------------------------------------------------------
1350 // A PageIterator iterates the pages in a paged space.
1352 class PageIterator BASE_EMBEDDED {
1354 explicit inline PageIterator(PagedSpace* space);
1356 inline bool has_next();
1357 inline Page* next();
1361 Page* prev_page_; // Previous page returned.
1362 // Next page that will be returned. Cached here so that we can use this
1363 // iterator for operations that deallocate pages.
1368 // -----------------------------------------------------------------------------
1369 // A space has a circular list of pages. The next page can be accessed via
1370 // Page::next_page() call.
1372 // An abstraction of allocation and relocation pointers in a page-structured
1374 class AllocationInfo {
1376 AllocationInfo() : top_(NULL), limit_(NULL) {}
1378 INLINE(void set_top(Address top)) {
1379 SLOW_DCHECK(top == NULL ||
1380 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1384 INLINE(Address top()) const {
1385 SLOW_DCHECK(top_ == NULL ||
1386 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1390 Address* top_address() { return &top_; }
1392 INLINE(void set_limit(Address limit)) {
1393 SLOW_DCHECK(limit == NULL ||
1394 (reinterpret_cast<intptr_t>(limit) & kHeapObjectTagMask) == 0);
1398 INLINE(Address limit()) const {
1399 SLOW_DCHECK(limit_ == NULL ||
1400 (reinterpret_cast<intptr_t>(limit_) & kHeapObjectTagMask) ==
1405 Address* limit_address() { return &limit_; }
1408 bool VerifyPagedAllocation() {
1409 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1415 // Current allocation top.
1417 // Current allocation limit.
1422 // An abstraction of the accounting statistics of a page-structured space.
1423 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1424 // including page bookkeeping structures) currently in the space. The 'size'
1425 // of a space is the number of allocated bytes, the 'waste' in the space is
1426 // the number of bytes that are not allocated and not available to
1427 // allocation without reorganizing the space via a GC (e.g. small blocks due
1428 // to internal fragmentation, top of page areas in map space), and the bytes
1429 // 'available' is the number of unallocated bytes that are not waste. The
1430 // capacity is the sum of size, waste, and available.
1432 // The stats are only set by functions that ensure they stay balanced. These
1433 // functions increase or decrease one of the non-capacity stats in
1434 // conjunction with capacity, or else they always balance increases and
1435 // decreases to the non-capacity stats.
1436 class AllocationStats BASE_EMBEDDED {
1438 AllocationStats() { Clear(); }
1440 // Zero out all the allocation statistics (i.e., no capacity).
1448 void ClearSizeWaste() {
1453 // Reset the allocation statistics (i.e., available = capacity with no
1454 // wasted or allocated bytes).
1460 // Accessors for the allocation statistics.
1461 intptr_t Capacity() { return capacity_; }
1462 intptr_t MaxCapacity() { return max_capacity_; }
1463 intptr_t Size() { return size_; }
1464 intptr_t Waste() { return waste_; }
1466 // Grow the space by adding available bytes. They are initially marked as
1467 // being in use (part of the size), but will normally be immediately freed,
1468 // putting them on the free list and removing them from size_.
1469 void ExpandSpace(int size_in_bytes) {
1470 capacity_ += size_in_bytes;
1471 size_ += size_in_bytes;
1472 if (capacity_ > max_capacity_) {
1473 max_capacity_ = capacity_;
1478 // Shrink the space by removing available bytes. Since shrinking is done
1479 // during sweeping, bytes have been marked as being in use (part of the size)
1480 // and are hereby freed.
1481 void ShrinkSpace(int size_in_bytes) {
1482 capacity_ -= size_in_bytes;
1483 size_ -= size_in_bytes;
1487 // Allocate from available bytes (available -> size).
1488 void AllocateBytes(intptr_t size_in_bytes) {
1489 size_ += size_in_bytes;
1493 // Free allocated bytes, making them available (size -> available).
1494 void DeallocateBytes(intptr_t size_in_bytes) {
1495 size_ -= size_in_bytes;
1499 // Waste free bytes (available -> waste).
1500 void WasteBytes(int size_in_bytes) {
1501 DCHECK(size_in_bytes >= 0);
1502 waste_ += size_in_bytes;
1505 // Merge {other} into {this}.
1506 void Merge(const AllocationStats& other) {
1507 capacity_ += other.capacity_;
1508 size_ += other.size_;
1509 waste_ += other.waste_;
1510 if (other.max_capacity_ > max_capacity_) {
1511 max_capacity_ = other.max_capacity_;
1515 void DecreaseCapacity(intptr_t size_in_bytes) {
1516 capacity_ -= size_in_bytes;
1517 DCHECK_GE(capacity_, 0);
1520 void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1524 intptr_t max_capacity_;
1530 // -----------------------------------------------------------------------------
1531 // Free lists for old object spaces
1533 // The free list category holds a pointer to the top element and a pointer to
1534 // the end element of the linked list of free memory blocks.
1535 class FreeListCategory {
1537 explicit FreeListCategory(FreeList* owner)
1538 : top_(0), end_(NULL), available_(0), owner_(owner) {}
1540 intptr_t Concatenate(FreeListCategory* category);
1544 void Free(FreeSpace* node, int size_in_bytes);
1546 FreeSpace* PickNodeFromList(int* node_size);
1547 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1549 intptr_t EvictFreeListItemsInList(Page* p);
1550 bool ContainsPageFreeListItemsInList(Page* p);
1552 void RepairFreeList(Heap* heap);
1554 FreeSpace* top() const {
1555 return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
1558 void set_top(FreeSpace* top) {
1559 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1562 FreeSpace* end() const { return end_; }
1563 void set_end(FreeSpace* end) { end_ = end; }
1565 int* GetAvailableAddress() { return &available_; }
1566 int available() const { return available_; }
1567 void set_available(int available) { available_ = available; }
1569 base::Mutex* mutex() { return &mutex_; }
1571 bool IsEmpty() { return top() == 0; }
1574 intptr_t SumFreeList();
1575 int FreeListLength();
1578 FreeList* owner() { return owner_; }
1581 // top_ points to the top FreeSpace* in the free list category.
1582 base::AtomicWord top_;
1586 // Total available bytes in all blocks of this free list category.
1593 // The free list for the old space. The free list is organized in such a way
1594 // as to encourage objects allocated around the same time to be near each
1595 // other. The normal way to allocate is intended to be by bumping a 'top'
1596 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1597 // find a new space to allocate from. This is done with the free list, which
1598 // is divided up into rough categories to cut down on waste. Having finer
1599 // categories would scatter allocation more.
1601 // The old space free list is organized in categories.
1602 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1603 // They can be reclaimed by the compactor. However the distance between top
1604 // and limit may be this small.
1605 // 32-255 words: There is a list of spaces this large. It is used for top and
1606 // limit when the object we need to allocate is 1-31 words in size. These
1607 // spaces are called small.
1608 // 256-2047 words: There is a list of spaces this large. It is used for top and
1609 // limit when the object we need to allocate is 32-255 words in size. These
1610 // spaces are called medium.
1611 // 1048-16383 words: There is a list of spaces this large. It is used for top
1612 // and limit when the object we need to allocate is 256-2047 words in size.
1613 // These spaces are call large.
1614 // At least 16384 words. This list is for objects of 2048 words or larger.
1615 // Empty pages are added to this list. These spaces are called huge.
1618 explicit FreeList(PagedSpace* owner);
1620 intptr_t Concatenate(FreeList* free_list);
1622 // Clear the free list.
1625 // Return the number of bytes available on the free list.
1626 intptr_t available() {
1627 return small_list_.available() + medium_list_.available() +
1628 large_list_.available() + huge_list_.available();
1631 // Place a node on the free list. The block of size 'size_in_bytes'
1632 // starting at 'start' is placed on the free list. The return value is the
1633 // number of bytes that have been lost due to internal fragmentation by
1634 // freeing the block. Bookkeeping information will be written to the block,
1635 // i.e., its contents will be destroyed. The start address should be word
1636 // aligned, and the size should be a non-zero multiple of the word size.
1637 int Free(Address start, int size_in_bytes);
1639 // This method returns how much memory can be allocated after freeing
1640 // maximum_freed memory.
1641 static inline int GuaranteedAllocatable(int maximum_freed) {
1642 if (maximum_freed <= kSmallListMin) {
1644 } else if (maximum_freed <= kSmallListMax) {
1645 return kSmallAllocationMax;
1646 } else if (maximum_freed <= kMediumListMax) {
1647 return kMediumAllocationMax;
1648 } else if (maximum_freed <= kLargeListMax) {
1649 return kLargeAllocationMax;
1651 return maximum_freed;
1654 // Allocate a block of size 'size_in_bytes' from the free list. The block
1655 // is unitialized. A failure is returned if no block is available. The
1656 // number of bytes lost to fragmentation is returned in the output parameter
1657 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1658 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1661 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1662 large_list_.IsEmpty() && huge_list_.IsEmpty();
1667 intptr_t SumFreeLists();
1671 // Used after booting the VM.
1672 void RepairLists(Heap* heap);
1674 intptr_t EvictFreeListItems(Page* p);
1675 bool ContainsPageFreeListItems(Page* p);
1677 FreeListCategory* small_list() { return &small_list_; }
1678 FreeListCategory* medium_list() { return &medium_list_; }
1679 FreeListCategory* large_list() { return &large_list_; }
1680 FreeListCategory* huge_list() { return &huge_list_; }
1682 PagedSpace* owner() { return owner_; }
1685 // The size range of blocks, in bytes.
1686 static const int kMinBlockSize = 3 * kPointerSize;
1687 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1689 static const int kSmallListMin = 0x1f * kPointerSize;
1690 static const int kSmallListMax = 0xff * kPointerSize;
1691 static const int kMediumListMax = 0x7ff * kPointerSize;
1692 static const int kLargeListMax = 0x3fff * kPointerSize;
1693 static const int kSmallAllocationMax = kSmallListMin;
1694 static const int kMediumAllocationMax = kSmallListMax;
1695 static const int kLargeAllocationMax = kMediumListMax;
1697 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1701 FreeListCategory small_list_;
1702 FreeListCategory medium_list_;
1703 FreeListCategory large_list_;
1704 FreeListCategory huge_list_;
1706 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1710 class AllocationResult {
1712 // Implicit constructor from Object*.
1713 AllocationResult(Object* object) // NOLINT
1715 // AllocationResults can't return Smis, which are used to represent
1716 // failure and the space to retry in.
1717 CHECK(!object->IsSmi());
1720 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
1722 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1723 return AllocationResult(space);
1726 inline bool IsRetry() { return object_->IsSmi(); }
1728 template <typename T>
1730 if (IsRetry()) return false;
1731 *obj = T::cast(object_);
1735 Object* ToObjectChecked() {
1740 inline AllocationSpace RetrySpace();
1743 explicit AllocationResult(AllocationSpace space)
1744 : object_(Smi::FromInt(static_cast<int>(space))) {}
1750 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1753 class PagedSpace : public Space {
1755 // Creates a space with an id.
1756 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1758 virtual ~PagedSpace() { TearDown(); }
1760 // Set up the space using the given address range of virtual memory (from
1761 // the memory allocator's initial chunk) if possible. If the block of
1762 // addresses is not big enough to contain a single page-aligned page, a
1763 // fresh chunk will be allocated.
1766 // Returns true if the space has been successfully set up and not
1767 // subsequently torn down.
1768 bool HasBeenSetUp();
1770 // Checks whether an object/address is in this space.
1771 inline bool Contains(Address a);
1772 inline bool Contains(HeapObject* o);
1773 // Unlike Contains() methods it is safe to call this one even for addresses
1774 // of unmapped memory.
1775 bool ContainsSafe(Address addr);
1777 // Given an address occupied by a live object, return that object if it is
1778 // in this space, or a Smi if it is not. The implementation iterates over
1779 // objects in the page containing the address, the cost is linear in the
1780 // number of objects in the page. It may be slow.
1781 Object* FindObject(Address addr);
1783 // During boot the free_space_map is created, and afterwards we may need
1784 // to write it into the free list nodes that were already created.
1785 void RepairFreeListsAfterDeserialization();
1787 // Prepares for a mark-compact GC.
1788 void PrepareForMarkCompact();
1790 // Current capacity without growing (Size() + Available()).
1791 intptr_t Capacity() { return accounting_stats_.Capacity(); }
1793 // Total amount of memory committed for this space. For paged
1794 // spaces this equals the capacity.
1795 intptr_t CommittedMemory() override { return Capacity(); }
1797 // The maximum amount of memory ever committed for this space.
1798 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1800 // Approximate amount of physical memory committed for this space.
1801 size_t CommittedPhysicalMemory() override;
1803 void ResetFreeListStatistics();
1805 // Sets the capacity, the available space and the wasted space to zero.
1806 // The stats are rebuilt during sweeping by adding each page to the
1807 // capacity and the size when it is encountered. As free spaces are
1808 // discovered during the sweeping they are subtracted from the size and added
1809 // to the available and wasted totals.
1811 accounting_stats_.ClearSizeWaste();
1812 ResetFreeListStatistics();
1815 // Increases the number of available bytes of that space.
1816 void AddToAccountingStats(intptr_t bytes) {
1817 accounting_stats_.DeallocateBytes(bytes);
1820 // Available bytes without growing. These are the bytes on the free list.
1821 // The bytes in the linear allocation area are not included in this total
1822 // because updating the stats would slow down allocation. New pages are
1823 // immediately added to the free list so they show up here.
1824 intptr_t Available() override { return free_list_.available(); }
1826 // Allocated bytes in this space. Garbage bytes that were not found due to
1827 // concurrent sweeping are counted as being allocated! The bytes in the
1828 // current linear allocation area (between top and limit) are also counted
1830 intptr_t Size() override { return accounting_stats_.Size(); }
1832 // As size, but the bytes in lazily swept pages are estimated and the bytes
1833 // in the current linear allocation area are not included.
1834 intptr_t SizeOfObjects() override;
1836 // Wasted bytes in this space. These are just the bytes that were thrown away
1837 // due to being too small to use for allocation. They do not include the
1838 // free bytes that were not found at all due to lazy sweeping.
1839 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1841 // Returns the allocation pointer in this space.
1842 Address top() { return allocation_info_.top(); }
1843 Address limit() { return allocation_info_.limit(); }
1845 // The allocation top address.
1846 Address* allocation_top_address() { return allocation_info_.top_address(); }
1848 // The allocation limit address.
1849 Address* allocation_limit_address() {
1850 return allocation_info_.limit_address();
1853 // Allocate the requested number of bytes in the space if possible, return a
1854 // failure object if not.
1855 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
1858 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
1861 // Allocate the requested number of bytes in the space double aligned if
1862 // possible, return a failure object if not.
1863 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1864 int size_in_bytes, AllocationAlignment alignment);
1866 // Allocate the requested number of bytes in the space and consider allocation
1867 // alignment if needed.
1868 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1869 int size_in_bytes, AllocationAlignment alignment);
1871 // Give a block of memory to the space's free list. It might be added to
1872 // the free list or accounted as waste.
1873 // If add_to_freelist is false then just accounting stats are updated and
1874 // no attempt to add area to free list is made.
1875 int Free(Address start, int size_in_bytes) {
1876 int wasted = free_list_.Free(start, size_in_bytes);
1877 accounting_stats_.DeallocateBytes(size_in_bytes);
1878 accounting_stats_.WasteBytes(wasted);
1879 return size_in_bytes - wasted;
1882 void ResetFreeList() { free_list_.Reset(); }
1884 // Set space allocation info.
1885 void SetTopAndLimit(Address top, Address limit) {
1886 DCHECK(top == limit ||
1887 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1888 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1889 allocation_info_.set_top(top);
1890 allocation_info_.set_limit(limit);
1893 // Empty space allocation info, returning unused area to free list.
1894 void EmptyAllocationInfo() {
1895 // Mark the old linear allocation area with a free space map so it can be
1896 // skipped when scanning the heap.
1897 int old_linear_size = static_cast<int>(limit() - top());
1898 Free(top(), old_linear_size);
1899 SetTopAndLimit(NULL, NULL);
1902 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
1904 void IncreaseCapacity(int size);
1906 // Releases an unused page and shrinks the space.
1907 void ReleasePage(Page* page);
1909 // The dummy page that anchors the linked list of pages.
1910 Page* anchor() { return &anchor_; }
1913 // Verify integrity of this space.
1914 virtual void Verify(ObjectVisitor* visitor);
1916 // Overridden by subclasses to verify space-specific object
1917 // properties (e.g., only maps or free-list nodes are in map space).
1918 virtual void VerifyObject(HeapObject* obj) {}
1922 // Print meta info and objects in this space.
1923 void Print() override;
1925 // Reports statistics for the space
1926 void ReportStatistics();
1928 // Report code object related statistics
1929 void CollectCodeStatistics();
1930 static void ReportCodeStatistics(Isolate* isolate);
1931 static void ResetCodeStatistics(Isolate* isolate);
1934 // Evacuation candidates are swept by evacuator. Needs to return a valid
1935 // result before _and_ after evacuation has finished.
1936 static bool ShouldBeSweptBySweeperThreads(Page* p) {
1937 return !p->IsEvacuationCandidate() &&
1938 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
1941 void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
1943 void IncreaseUnsweptFreeBytes(Page* p) {
1944 DCHECK(ShouldBeSweptBySweeperThreads(p));
1945 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1948 void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
1950 void DecreaseUnsweptFreeBytes(Page* p) {
1951 DCHECK(ShouldBeSweptBySweeperThreads(p));
1952 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1955 void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
1957 // This function tries to steal size_in_bytes memory from the sweeper threads
1958 // free-lists. If it does not succeed stealing enough memory, it will wait
1959 // for the sweeper threads to finish sweeping.
1960 // It returns true when sweeping is completed and false otherwise.
1961 bool EnsureSweeperProgress(intptr_t size_in_bytes);
1963 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
1965 Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
1967 Page* FirstPage() { return anchor_.next_page(); }
1968 Page* LastPage() { return anchor_.prev_page(); }
1970 void EvictEvacuationCandidatesFromFreeLists();
1972 bool CanExpand(size_t size);
1974 // Returns the number of total pages in this space.
1975 int CountTotalPages();
1977 // Return size of allocatable area on a page in this space.
1978 inline int AreaSize() { return area_size_; }
1980 // Merges {other} into the current space. Note that this modifies {other},
1981 // e.g., removes its bump pointer area and resets statistics.
1982 void MergeCompactionSpace(CompactionSpace* other);
1984 void MoveOverFreeMemory(PagedSpace* other);
1986 virtual bool is_local() { return false; }
1989 // PagedSpaces that should be included in snapshots have different, i.e.,
1990 // smaller, initial pages.
1991 virtual bool snapshotable() { return true; }
1993 FreeList* free_list() { return &free_list_; }
1995 bool HasPages() { return anchor_.next_page() != &anchor_; }
1997 // Cleans up the space, frees all pages in this space except those belonging
1998 // to the initial chunk, uncommits addresses in the initial chunk.
2001 // Expands the space by allocating a fixed number of pages. Returns false if
2002 // it cannot allocate requested number of pages from OS, or if the hard heap
2003 // size limit has been hit.
2006 // Generic fast case allocation function that tries linear allocation at the
2007 // address denoted by top in allocation_info_.
2008 inline HeapObject* AllocateLinearly(int size_in_bytes);
2010 // Generic fast case allocation function that tries aligned linear allocation
2011 // at the address denoted by top in allocation_info_. Writes the aligned
2012 // allocation size, which includes the filler size, to size_in_bytes.
2013 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2014 AllocationAlignment alignment);
2016 // If sweeping is still in progress try to sweep unswept pages. If that is
2017 // not successful, wait for the sweeper threads and re-try free-list
2019 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
2022 // Slow path of AllocateRaw. This function is space-dependent.
2023 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2027 // Accounting information for this space.
2028 AllocationStats accounting_stats_;
2030 // The dummy page that anchors the double linked list of pages.
2033 // The space's free list.
2034 FreeList free_list_;
2036 // Normal allocation information.
2037 AllocationInfo allocation_info_;
2039 // The number of free bytes which could be reclaimed by advancing the
2040 // concurrent sweeper threads.
2041 intptr_t unswept_free_bytes_;
2043 // The sweeper threads iterate over the list of pointer and data space pages
2044 // and sweep these pages concurrently. They will stop sweeping after the
2045 // end_of_unswept_pages_ page.
2046 Page* end_of_unswept_pages_;
2048 // Mutex guarding any concurrent access to the space.
2049 base::Mutex space_mutex_;
2051 friend class MarkCompactCollector;
2052 friend class PageIterator;
2056 class NumberAndSizeInfo BASE_EMBEDDED {
2058 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2060 int number() const { return number_; }
2061 void increment_number(int num) { number_ += num; }
2063 int bytes() const { return bytes_; }
2064 void increment_bytes(int size) { bytes_ += size; }
2077 // HistogramInfo class for recording a single "bar" of a histogram. This
2078 // class is used for collecting statistics to print to the log file.
2079 class HistogramInfo : public NumberAndSizeInfo {
2081 HistogramInfo() : NumberAndSizeInfo() {}
2083 const char* name() { return name_; }
2084 void set_name(const char* name) { name_ = name; }
2091 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2097 class NewSpacePage : public MemoryChunk {
2099 // GC related flags copied from from-space to to-space when
2100 // flipping semispaces.
2101 static const intptr_t kCopyOnFlipFlagsMask =
2102 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2103 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2104 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2106 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2108 inline NewSpacePage* next_page() {
2109 return static_cast<NewSpacePage*>(next_chunk());
2112 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2114 inline NewSpacePage* prev_page() {
2115 return static_cast<NewSpacePage*>(prev_chunk());
2118 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2120 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2122 bool is_anchor() { return !this->InNewSpace(); }
2124 static bool IsAtStart(Address addr) {
2125 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2129 static bool IsAtEnd(Address addr) {
2130 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2133 Address address() { return reinterpret_cast<Address>(this); }
2135 // Finds the NewSpacePage containing the given address.
2136 static inline NewSpacePage* FromAddress(Address address_in_page) {
2137 Address page_start =
2138 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2139 ~Page::kPageAlignmentMask);
2140 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2144 // Find the page for a limit address. A limit address is either an address
2145 // inside a page, or the address right after the last byte of a page.
2146 static inline NewSpacePage* FromLimit(Address address_limit) {
2147 return NewSpacePage::FromAddress(address_limit - 1);
2150 // Checks if address1 and address2 are on the same new space page.
2151 static inline bool OnSamePage(Address address1, Address address2) {
2152 return NewSpacePage::FromAddress(address1) ==
2153 NewSpacePage::FromAddress(address2);
2157 // Create a NewSpacePage object that is only used as anchor
2158 // for the doubly-linked list of real pages.
2159 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2161 static NewSpacePage* Initialize(Heap* heap, Address start,
2162 SemiSpace* semi_space);
2164 // Intialize a fake NewSpacePage used as sentinel at the ends
2165 // of a doubly-linked list of real NewSpacePages.
2166 // Only uses the prev/next links, and sets flags to not be in new-space.
2167 void InitializeAsAnchor(SemiSpace* owner);
2169 friend class SemiSpace;
2170 friend class SemiSpaceIterator;
2174 // -----------------------------------------------------------------------------
2175 // SemiSpace in young generation
2177 // A semispace is a contiguous chunk of memory holding page-like memory
2178 // chunks. The mark-compact collector uses the memory of the first page in
2179 // the from space as a marking stack when tracing live objects.
2181 class SemiSpace : public Space {
2184 SemiSpace(Heap* heap, SemiSpaceId semispace)
2185 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2190 current_page_(NULL) {}
2192 // Sets up the semispace using the given chunk.
2193 void SetUp(Address start, int initial_capacity, int target_capacity,
2194 int maximum_capacity);
2196 // Tear down the space. Heap memory was not allocated by the space, so it
2197 // is not deallocated here.
2200 // True if the space has been set up but not torn down.
2201 bool HasBeenSetUp() { return start_ != NULL; }
2203 // Grow the semispace to the new capacity. The new capacity
2204 // requested must be larger than the current capacity and less than
2205 // the maximum capacity.
2206 bool GrowTo(int new_capacity);
2208 // Shrinks the semispace to the new capacity. The new capacity
2209 // requested must be more than the amount of used memory in the
2210 // semispace and less than the current capacity.
2211 bool ShrinkTo(int new_capacity);
2213 // Sets the total capacity. Only possible when the space is not committed.
2214 bool SetTotalCapacity(int new_capacity);
2216 // Returns the start address of the first page of the space.
2217 Address space_start() {
2218 DCHECK(anchor_.next_page() != &anchor_);
2219 return anchor_.next_page()->area_start();
2222 // Returns the start address of the current page of the space.
2223 Address page_low() { return current_page_->area_start(); }
2225 // Returns one past the end address of the space.
2226 Address space_end() { return anchor_.prev_page()->area_end(); }
2228 // Returns one past the end address of the current page of the space.
2229 Address page_high() { return current_page_->area_end(); }
2231 bool AdvancePage() {
2232 NewSpacePage* next_page = current_page_->next_page();
2233 if (next_page == anchor()) return false;
2234 current_page_ = next_page;
2238 // Resets the space to using the first page.
2241 // Age mark accessors.
2242 Address age_mark() { return age_mark_; }
2243 void set_age_mark(Address mark);
2245 // True if the address is in the address range of this semispace (not
2246 // necessarily below the allocation pointer).
2247 bool Contains(Address a) {
2248 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2249 reinterpret_cast<uintptr_t>(start_);
2252 // True if the object is a heap object in the address range of this
2253 // semispace (not necessarily below the allocation pointer).
2254 bool Contains(Object* o) {
2255 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2258 // If we don't have these here then SemiSpace will be abstract. However
2259 // they should never be called:
2261 intptr_t Size() override {
2266 intptr_t SizeOfObjects() override { return Size(); }
2268 intptr_t CommittedMemory() override {
2273 intptr_t Available() override {
2279 bool is_committed() { return committed_; }
2283 NewSpacePage* first_page() { return anchor_.next_page(); }
2284 NewSpacePage* current_page() { return current_page_; }
2287 virtual void Verify();
2291 void Print() override;
2292 // Validate a range of of addresses in a SemiSpace.
2293 // The "from" address must be on a page prior to the "to" address,
2294 // in the linked page order, or it must be earlier on the same page.
2295 static void AssertValidRange(Address from, Address to);
2298 inline static void AssertValidRange(Address from, Address to) {}
2301 // Returns the current total capacity of the semispace.
2302 int TotalCapacity() { return total_capacity_; }
2304 // Returns the target for total capacity of the semispace.
2305 int TargetCapacity() { return target_capacity_; }
2307 // Returns the maximum total capacity of the semispace.
2308 int MaximumTotalCapacity() { return maximum_total_capacity_; }
2310 // Returns the initial capacity of the semispace.
2311 int InitialTotalCapacity() { return initial_total_capacity_; }
2313 SemiSpaceId id() { return id_; }
2315 static void Swap(SemiSpace* from, SemiSpace* to);
2317 // Returns the maximum amount of memory ever committed by the semi space.
2318 size_t MaximumCommittedMemory() { return maximum_committed_; }
2320 // Approximate amount of physical memory committed for this space.
2321 size_t CommittedPhysicalMemory() override;
2324 // Flips the semispace between being from-space and to-space.
2325 // Copies the flags into the masked positions on all pages in the space.
2326 void FlipPages(intptr_t flags, intptr_t flag_mask);
2328 // Updates Capacity and MaximumCommitted based on new capacity.
2329 void SetCapacity(int new_capacity);
2331 NewSpacePage* anchor() { return &anchor_; }
2333 // The current and maximum total capacity of the space.
2334 int total_capacity_;
2335 int target_capacity_;
2336 int maximum_total_capacity_;
2337 int initial_total_capacity_;
2339 intptr_t maximum_committed_;
2341 // The start address of the space.
2343 // Used to govern object promotion during mark-compact collection.
2346 // Masks and comparison values to test for containment in this semispace.
2347 uintptr_t address_mask_;
2348 uintptr_t object_mask_;
2349 uintptr_t object_expected_;
2354 NewSpacePage anchor_;
2355 NewSpacePage* current_page_;
2357 friend class SemiSpaceIterator;
2358 friend class NewSpacePageIterator;
2362 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2363 // semispace of the heap's new space. It iterates over the objects in the
2364 // semispace from a given start address (defaulting to the bottom of the
2365 // semispace) to the top of the semispace. New objects allocated after the
2366 // iterator is created are not iterated.
2367 class SemiSpaceIterator : public ObjectIterator {
2369 // Create an iterator over the allocated objects in the given to-space.
2370 explicit SemiSpaceIterator(NewSpace* space);
2372 inline HeapObject* Next();
2374 // Implementation of the ObjectIterator functions.
2375 virtual inline HeapObject* next_object();
2378 void Initialize(Address start, Address end);
2380 // The current iteration point.
2382 // The end of iteration.
2387 // -----------------------------------------------------------------------------
2388 // A PageIterator iterates the pages in a semi-space.
2389 class NewSpacePageIterator BASE_EMBEDDED {
2391 // Make an iterator that runs over all pages in to-space.
2392 explicit inline NewSpacePageIterator(NewSpace* space);
2394 // Make an iterator that runs over all pages in the given semispace,
2395 // even those not used in allocation.
2396 explicit inline NewSpacePageIterator(SemiSpace* space);
2398 // Make iterator that iterates from the page containing start
2399 // to the page that contains limit in the same semispace.
2400 inline NewSpacePageIterator(Address start, Address limit);
2402 inline bool has_next();
2403 inline NewSpacePage* next();
2406 NewSpacePage* prev_page_; // Previous page returned.
2407 // Next page that will be returned. Cached here so that we can use this
2408 // iterator for operations that deallocate pages.
2409 NewSpacePage* next_page_;
2410 // Last page returned.
2411 NewSpacePage* last_page_;
2415 // -----------------------------------------------------------------------------
2416 // The young generation space.
2418 // The new space consists of a contiguous pair of semispaces. It simply
2419 // forwards most functions to the appropriate semispace.
2421 class NewSpace : public Space {
2424 explicit NewSpace(Heap* heap)
2425 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2426 to_space_(heap, kToSpace),
2427 from_space_(heap, kFromSpace),
2429 inline_allocation_limit_step_(0),
2430 top_on_previous_step_(0) {}
2432 // Sets up the new space using the given chunk.
2433 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2435 // Tears down the space. Heap memory was not allocated by the space, so it
2436 // is not deallocated here.
2439 // True if the space has been set up but not torn down.
2440 bool HasBeenSetUp() {
2441 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2444 // Flip the pair of spaces.
2447 // Grow the capacity of the semispaces. Assumes that they are not at
2448 // their maximum capacity.
2451 // Grow the capacity of the semispaces by one page.
2454 // Shrink the capacity of the semispaces.
2457 // True if the address or object lies in the address range of either
2458 // semispace (not necessarily below the allocation pointer).
2459 bool Contains(Address a) {
2460 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2461 reinterpret_cast<uintptr_t>(start_);
2464 bool Contains(Object* o) {
2465 Address a = reinterpret_cast<Address>(o);
2466 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2469 // Return the allocated bytes in the active semispace.
2470 intptr_t Size() override {
2471 return pages_used_ * NewSpacePage::kAreaSize +
2472 static_cast<int>(top() - to_space_.page_low());
2475 // The same, but returning an int. We have to have the one that returns
2476 // intptr_t because it is inherited, but if we know we are dealing with the
2477 // new space, which can't get as big as the other spaces then this is useful:
2478 int SizeAsInt() { return static_cast<int>(Size()); }
2480 // Return the allocatable capacity of a semispace.
2481 intptr_t Capacity() {
2482 SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2483 return (to_space_.TotalCapacity() / Page::kPageSize) *
2484 NewSpacePage::kAreaSize;
2487 // Return the current size of a semispace, allocatable and non-allocatable
2489 intptr_t TotalCapacity() {
2490 DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2491 return to_space_.TotalCapacity();
2494 // Return the total amount of memory committed for new space.
2495 intptr_t CommittedMemory() override {
2496 if (from_space_.is_committed()) return 2 * Capacity();
2497 return TotalCapacity();
2500 // Return the total amount of memory committed for new space.
2501 intptr_t MaximumCommittedMemory() {
2502 return to_space_.MaximumCommittedMemory() +
2503 from_space_.MaximumCommittedMemory();
2506 // Approximate amount of physical memory committed for this space.
2507 size_t CommittedPhysicalMemory() override;
2509 // Return the available bytes without growing.
2510 intptr_t Available() override { return Capacity() - Size(); }
2512 intptr_t PagesFromStart(Address addr) {
2513 return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
2516 size_t AllocatedSinceLastGC() {
2517 intptr_t allocated = top() - to_space_.age_mark();
2518 if (allocated < 0) {
2519 // Runtime has lowered the top below the age mark.
2522 // Correctly account for non-allocatable regions at the beginning of
2523 // each page from the age_mark() to the top().
2525 PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
2526 allocated -= pages * (NewSpacePage::kObjectStartOffset);
2527 DCHECK(0 <= allocated && allocated <= Size());
2528 return static_cast<size_t>(allocated);
2531 // Return the maximum capacity of a semispace.
2532 int MaximumCapacity() {
2533 DCHECK(to_space_.MaximumTotalCapacity() ==
2534 from_space_.MaximumTotalCapacity());
2535 return to_space_.MaximumTotalCapacity();
2538 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2540 // Returns the initial capacity of a semispace.
2541 int InitialTotalCapacity() {
2542 DCHECK(to_space_.InitialTotalCapacity() ==
2543 from_space_.InitialTotalCapacity());
2544 return to_space_.InitialTotalCapacity();
2547 // Return the address of the allocation pointer in the active semispace.
2549 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2550 return allocation_info_.top();
2553 // Return the address of the allocation pointer limit in the active semispace.
2555 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2556 return allocation_info_.limit();
2559 // Return the address of the first object in the active semispace.
2560 Address bottom() { return to_space_.space_start(); }
2562 // Get the age mark of the inactive semispace.
2563 Address age_mark() { return from_space_.age_mark(); }
2564 // Set the age mark in the active semispace.
2565 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2567 // The start address of the space and a bit mask. Anding an address in the
2568 // new space with the mask will result in the start address.
2569 Address start() { return start_; }
2570 uintptr_t mask() { return address_mask_; }
2572 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2573 DCHECK(Contains(addr));
2574 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
2575 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2576 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2579 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2580 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2583 // The allocation top and limit address.
2584 Address* allocation_top_address() { return allocation_info_.top_address(); }
2586 // The allocation limit address.
2587 Address* allocation_limit_address() {
2588 return allocation_info_.limit_address();
2591 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2592 int size_in_bytes, AllocationAlignment alignment));
2594 MUST_USE_RESULT INLINE(
2595 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2597 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2598 int size_in_bytes, AllocationAlignment alignment));
2600 // Reset the allocation pointer to the beginning of the active semispace.
2601 void ResetAllocationInfo();
2603 void UpdateInlineAllocationLimit(int size_in_bytes);
2604 void LowerInlineAllocationLimit(intptr_t step) {
2605 inline_allocation_limit_step_ = step;
2606 UpdateInlineAllocationLimit(0);
2607 top_on_previous_step_ = step ? allocation_info_.top() : 0;
2610 // Get the extent of the inactive semispace (for use as a marking stack,
2611 // or to zap it). Notice: space-addresses are not necessarily on the
2612 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2613 Address FromSpacePageLow() { return from_space_.page_low(); }
2614 Address FromSpacePageHigh() { return from_space_.page_high(); }
2615 Address FromSpaceStart() { return from_space_.space_start(); }
2616 Address FromSpaceEnd() { return from_space_.space_end(); }
2618 // Get the extent of the active semispace's pages' memory.
2619 Address ToSpaceStart() { return to_space_.space_start(); }
2620 Address ToSpaceEnd() { return to_space_.space_end(); }
2622 inline bool ToSpaceContains(Address address) {
2623 return to_space_.Contains(address);
2625 inline bool FromSpaceContains(Address address) {
2626 return from_space_.Contains(address);
2629 // True if the object is a heap object in the address range of the
2630 // respective semispace (not necessarily below the allocation pointer of the
2632 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2633 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2635 // Try to switch the active semispace to a new, empty, page.
2636 // Returns false if this isn't possible or reasonable (i.e., there
2637 // are no pages, or the current page is already empty), or true
2639 bool AddFreshPage();
2642 // Verify the active semispace.
2643 virtual void Verify();
2647 // Print the active semispace.
2648 void Print() override { to_space_.Print(); }
2651 // Iterates the active semispace to collect statistics.
2652 void CollectStatistics();
2653 // Reports previously collected statistics of the active semispace.
2654 void ReportStatistics();
2655 // Clears previously collected statistics.
2656 void ClearHistograms();
2658 // Record the allocation or promotion of a heap object. Note that we don't
2659 // record every single allocation, but only those that happen in the
2660 // to space during a scavenge GC.
2661 void RecordAllocation(HeapObject* obj);
2662 void RecordPromotion(HeapObject* obj);
2664 // Return whether the operation succeded.
2665 bool CommitFromSpaceIfNeeded() {
2666 if (from_space_.is_committed()) return true;
2667 return from_space_.Commit();
2670 bool UncommitFromSpace() {
2671 if (!from_space_.is_committed()) return true;
2672 return from_space_.Uncommit();
2675 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2677 inline intptr_t inline_allocation_limit_step() {
2678 return inline_allocation_limit_step_;
2681 SemiSpace* active_space() { return &to_space_; }
2684 // Update allocation info to match the current to-space page.
2685 void UpdateAllocationInfo();
2687 Address chunk_base_;
2688 uintptr_t chunk_size_;
2691 SemiSpace to_space_;
2692 SemiSpace from_space_;
2693 base::VirtualMemory reservation_;
2696 // Start address and bit mask for containment testing.
2698 uintptr_t address_mask_;
2699 uintptr_t object_mask_;
2700 uintptr_t object_expected_;
2702 // Allocation pointer and limit for normal allocation and allocation during
2703 // mark-compact collection.
2704 AllocationInfo allocation_info_;
2706 // When incremental marking is active we will set allocation_info_.limit
2707 // to be lower than actual limit and then will gradually increase it
2708 // in steps to guarantee that we do incremental marking steps even
2709 // when all allocation is performed from inlined generated code.
2710 intptr_t inline_allocation_limit_step_;
2712 Address top_on_previous_step_;
2714 HistogramInfo* allocated_histogram_;
2715 HistogramInfo* promoted_histogram_;
2717 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2719 // If we are doing inline allocation in steps, this method performs the 'step'
2720 // operation. Right now incremental marking is the only consumer of inline
2721 // allocation steps. top is the memory address of the bump pointer at the last
2722 // inline allocation (i.e. it determines the numbers of bytes actually
2723 // allocated since the last step.) new_top is the address of the bump pointer
2724 // where the next byte is going to be allocated from. top and new_top may be
2725 // different when we cross a page boundary or reset the space.
2726 void InlineAllocationStep(Address top, Address new_top);
2728 friend class SemiSpaceIterator;
2731 // -----------------------------------------------------------------------------
2732 // Compaction space that is used temporarily during compaction.
2734 class CompactionSpace : public PagedSpace {
2736 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2737 : PagedSpace(heap, id, executable) {}
2739 // Adds external memory starting at {start} of {size_in_bytes} to the space.
2740 void AddExternalMemory(Address start, int size_in_bytes) {
2741 IncreaseCapacity(size_in_bytes);
2742 Free(start, size_in_bytes);
2745 virtual bool is_local() { return true; }
2748 // The space is temporary and not included in any snapshots.
2749 virtual bool snapshotable() { return false; }
2753 // A collection of |CompactionSpace|s used by a single compaction task.
2754 class CompactionSpaceCollection : public Malloced {
2756 explicit CompactionSpaceCollection(Heap* heap)
2757 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2758 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2760 CompactionSpace* Get(AllocationSpace space) {
2765 return &code_space_;
2774 CompactionSpace old_space_;
2775 CompactionSpace code_space_;
2779 // -----------------------------------------------------------------------------
2780 // Old object space (includes the old space of objects and code space)
2782 class OldSpace : public PagedSpace {
2784 // Creates an old space object. The constructor does not allocate pages
2786 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2787 : PagedSpace(heap, id, executable) {}
2791 // For contiguous spaces, top should be in the space (or at the end) and limit
2792 // should be the end of the space.
2793 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2794 SLOW_DCHECK((space).page_low() <= (info).top() && \
2795 (info).top() <= (space).page_high() && \
2796 (info).limit() <= (space).page_high())
2799 // -----------------------------------------------------------------------------
2800 // Old space for all map objects
2802 class MapSpace : public PagedSpace {
2804 // Creates a map space object.
2805 MapSpace(Heap* heap, AllocationSpace id)
2806 : PagedSpace(heap, id, NOT_EXECUTABLE),
2807 max_map_space_pages_(kMaxMapPageIndex - 1) {}
2809 // Given an index, returns the page address.
2810 // TODO(1600): this limit is artifical just to keep code compilable
2811 static const int kMaxMapPageIndex = 1 << 16;
2813 virtual int RoundSizeDownToObjectAlignment(int size) {
2814 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2815 return RoundDown(size, Map::kSize);
2817 return (size / Map::kSize) * Map::kSize;
2822 virtual void VerifyObject(HeapObject* obj);
2825 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2827 // Do map space compaction if there is a page gap.
2828 int CompactionThreshold() {
2829 return kMapsPerPage * (max_map_space_pages_ - 1);
2832 const int max_map_space_pages_;
2836 // -----------------------------------------------------------------------------
2837 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2838 // the large object space. A large object is allocated from OS heap with
2839 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2840 // A large object always starts at Page::kObjectStartOffset to a page.
2841 // Large objects do not move during garbage collections.
2843 class LargeObjectSpace : public Space {
2845 LargeObjectSpace(Heap* heap, AllocationSpace id);
2846 virtual ~LargeObjectSpace();
2848 // Initializes internal data structures.
2851 // Releases internal resources, frees objects in this space.
2854 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2855 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2856 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2859 // Shared implementation of AllocateRaw, AllocateRawCode and
2860 // AllocateRawFixedArray.
2861 MUST_USE_RESULT AllocationResult
2862 AllocateRaw(int object_size, Executability executable);
2864 // Available bytes for objects in this space.
2865 inline intptr_t Available() override;
2867 intptr_t Size() override { return size_; }
2869 intptr_t SizeOfObjects() override { return objects_size_; }
2871 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
2873 intptr_t CommittedMemory() override { return Size(); }
2875 // Approximate amount of physical memory committed for this space.
2876 size_t CommittedPhysicalMemory() override;
2878 int PageCount() { return page_count_; }
2880 // Finds an object for a given address, returns a Smi if it is not found.
2881 // The function iterates through all objects in this space, may be slow.
2882 Object* FindObject(Address a);
2884 // Finds a large object page containing the given address, returns NULL
2885 // if such a page doesn't exist.
2886 LargePage* FindPage(Address a);
2888 // Clears the marking state of live objects.
2889 void ClearMarkingStateOfLiveObjects();
2891 // Frees unmarked objects.
2892 void FreeUnmarkedObjects();
2894 // Checks whether a heap object is in this space; O(1).
2895 bool Contains(HeapObject* obj);
2896 bool Contains(Address address);
2898 // Checks whether the space is empty.
2899 bool IsEmpty() { return first_page_ == NULL; }
2901 LargePage* first_page() { return first_page_; }
2904 virtual void Verify();
2908 void Print() override;
2909 void ReportStatistics();
2910 void CollectCodeStatistics();
2912 // Checks whether an address is in the object area in this space. It
2913 // iterates all objects in the space. May be slow.
2914 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2917 intptr_t maximum_committed_;
2918 // The head of the linked list of large object chunks.
2919 LargePage* first_page_;
2920 intptr_t size_; // allocated bytes
2921 int page_count_; // number of chunks
2922 intptr_t objects_size_; // size of objects
2923 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2926 friend class LargeObjectIterator;
2930 class LargeObjectIterator : public ObjectIterator {
2932 explicit LargeObjectIterator(LargeObjectSpace* space);
2936 // implementation of ObjectIterator.
2937 virtual HeapObject* next_object() { return Next(); }
2940 LargePage* current_;
2944 // Iterates over the chunks (pages and large object pages) that can contain
2945 // pointers to new space.
2946 class PointerChunkIterator BASE_EMBEDDED {
2948 inline explicit PointerChunkIterator(Heap* heap);
2950 // Return NULL when the iterator is done.
2951 inline MemoryChunk* next();
2954 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
2956 PageIterator old_iterator_;
2957 PageIterator map_iterator_;
2958 LargeObjectIterator lo_iterator_;
2963 struct CommentStatistic {
2964 const char* comment;
2972 // Must be small, since an iteration is used for lookup.
2973 static const int kMaxComments = 64;
2977 } // namespace v8::internal
2979 #endif // V8_HEAP_SPACES_H_