1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/base/bits.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/hashmap.h"
15 #include "src/utils.h"
22 // -----------------------------------------------------------------------------
25 // A JS heap consists of a young generation, an old generation, and a large
26 // object space. The young generation is divided into two semispaces. A
27 // scavenger implements Cheney's copying algorithm. The old generation is
28 // separated into a map space and an old object space. The map space contains
29 // all (and only) map objects, the rest of old objects go into the old space.
30 // The old generation is collected by a mark-sweep-compact collector.
32 // The semispaces of the young generation are contiguous. The old and map
33 // spaces consists of a list of pages. A page has a page header and an object
36 // There is a separate large object space for objects larger than
37 // Page::kMaxHeapObjectSize, so that they do not have to move during
38 // collection. The large object space is paged. Pages in large object space
39 // may be larger than the page size.
41 // A store-buffer based write barrier is used to keep track of intergenerational
42 // references. See heap/store-buffer.h.
44 // During scavenges and mark-sweep collections we sometimes (after a store
45 // buffer overflow) iterate intergenerational pointers without decoding heap
46 // object maps so if the page belongs to old space or large object space
47 // it is essential to guarantee that the page does not contain any
48 // garbage pointers to new space: every pointer aligned word which satisfies
49 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
50 // new space. Thus objects in old space and large object spaces should have a
51 // special layout (e.g. no bare integer fields). This requirement does not
52 // apply to map space which is iterated in a special fashion. However we still
53 // require pointer fields of dead maps to be cleaned.
55 // To enable lazy cleaning of old space pages we can mark chunks of the page
56 // as being garbage. Garbage sections are marked with a special map. These
57 // sections are skipped when scanning the page, even if we are otherwise
58 // scanning without regard for object boundaries. Garbage sections are chained
59 // together to form a free list after a GC. Garbage sections created outside
60 // of GCs by object trunctation etc. may not be in the free list chain. Very
61 // small free spaces are ignored, they need only be cleaned of bogus pointers
64 // Each page may have up to one special garbage section. The start of this
65 // section is denoted by the top field in the space. The end of the section
66 // is denoted by the limit field in the space. This special garbage section
67 // is not marked with a free space map in the data. The point of this section
68 // is to enable linear allocation without having to constantly update the byte
69 // array every time the top field is updated and a new object is created. The
70 // special garbage section is not in the chain of garbage sections.
72 // Since the top and limit fields are in the space, not the page, only one page
73 // has a special garbage section, and if the top and limit are equal then there
74 // is no special garbage section.
76 // Some assertion macros used in the debugging mode.
78 #define DCHECK_PAGE_ALIGNED(address) \
79 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
81 #define DCHECK_OBJECT_ALIGNED(address) \
82 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
84 #define DCHECK_OBJECT_SIZE(size) \
85 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
87 #define DCHECK_PAGE_OFFSET(offset) \
88 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
90 #define DCHECK_MAP_PAGE_INDEX(index) \
91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
95 class MemoryAllocator;
103 typedef uint32_t CellType;
105 inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
108 bool operator==(const MarkBit& other) {
109 return cell_ == other.cell_ && mask_ == other.mask_;
114 inline CellType* cell() { return cell_; }
115 inline CellType mask() { return mask_; }
117 inline MarkBit Next() {
118 CellType new_mask = mask_ << 1;
120 return MarkBit(cell_ + 1, 1);
122 return MarkBit(cell_, new_mask);
126 inline void Set() { *cell_ |= mask_; }
127 inline bool Get() { return (*cell_ & mask_) != 0; }
128 inline void Clear() { *cell_ &= ~mask_; }
133 friend class Marking;
137 // Bitmap is a sequence of cells each containing fixed number of bits.
140 static const uint32_t kBitsPerCell = 32;
141 static const uint32_t kBitsPerCellLog2 = 5;
142 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
143 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
144 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
146 static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
148 static const size_t kSize =
149 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
152 static int CellsForLength(int length) {
153 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
156 int CellsCount() { return CellsForLength(kLength); }
158 static int SizeFor(int cells_count) {
159 return sizeof(MarkBit::CellType) * cells_count;
162 INLINE(static uint32_t IndexToCell(uint32_t index)) {
163 return index >> kBitsPerCellLog2;
166 INLINE(static uint32_t CellToIndex(uint32_t index)) {
167 return index << kBitsPerCellLog2;
170 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
171 return (index + kBitIndexMask) & ~kBitIndexMask;
174 INLINE(MarkBit::CellType* cells()) {
175 return reinterpret_cast<MarkBit::CellType*>(this);
178 INLINE(Address address()) { return reinterpret_cast<Address>(this); }
180 INLINE(static Bitmap* FromAddress(Address addr)) {
181 return reinterpret_cast<Bitmap*>(addr);
184 inline MarkBit MarkBitFromIndex(uint32_t index) {
185 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
186 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
187 return MarkBit(cell, mask);
190 static inline void Clear(MemoryChunk* chunk);
192 static void PrintWord(uint32_t word, uint32_t himask = 0) {
193 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
194 if ((mask & himask) != 0) PrintF("[");
195 PrintF((mask & word) ? "1" : "0");
196 if ((mask & himask) != 0) PrintF("]");
202 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
204 void Print(uint32_t pos, uint32_t cell) {
205 if (cell == seq_type) {
225 if (seq_length > 0) {
226 PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
227 seq_length * kBitsPerCell);
232 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
242 for (int i = 0; i < CellsCount(); i++) {
243 printer.Print(i, cells()[i]);
250 for (int i = 0; i < CellsCount(); i++) {
251 if (cells()[i] != 0) {
263 // MemoryChunk represents a memory region owned by a specific space.
264 // It is divided into the header and the body. Chunk start is always
265 // 1MB aligned. Start of the body is aligned so it can accommodate
269 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
270 static MemoryChunk* FromAddress(Address a) {
271 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
273 static const MemoryChunk* FromAddress(const byte* a) {
274 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
278 // Only works for addresses in pointer spaces, not data or code spaces.
279 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
281 static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
283 Address address() { return reinterpret_cast<Address>(this); }
285 MemoryChunk* next_chunk() const {
286 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
289 MemoryChunk* prev_chunk() const {
290 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
293 void set_next_chunk(MemoryChunk* next) {
294 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
297 void set_prev_chunk(MemoryChunk* prev) {
298 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
301 Space* owner() const {
302 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
304 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
311 void set_owner(Space* space) {
312 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
313 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
314 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
318 base::VirtualMemory* reserved_memory() { return &reservation_; }
320 void InitializeReservedMemory() { reservation_.Reset(); }
322 void set_reserved_memory(base::VirtualMemory* reservation) {
323 DCHECK_NOT_NULL(reservation);
324 reservation_.TakeControl(reservation);
327 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
328 void initialize_scan_on_scavenge(bool scan) {
330 SetFlag(SCAN_ON_SCAVENGE);
332 ClearFlag(SCAN_ON_SCAVENGE);
335 inline void set_scan_on_scavenge(bool scan);
337 int store_buffer_counter() { return store_buffer_counter_; }
338 void set_store_buffer_counter(int counter) {
339 store_buffer_counter_ = counter;
342 bool Contains(Address addr) {
343 return addr >= area_start() && addr < area_end();
346 // Checks whether addr can be a limit of addresses in this page.
347 // It's a limit if it's in the page, or if it's just after the
348 // last byte of the page.
349 bool ContainsLimit(Address addr) {
350 return addr >= area_start() && addr <= area_end();
353 // Every n write barrier invocations we go to runtime even though
354 // we could have handled it in generated code. This lets us check
355 // whether we have hit the limit and should do some more marking.
356 static const int kWriteBarrierCounterGranularity = 500;
358 enum MemoryChunkFlags {
361 POINTERS_TO_HERE_ARE_INTERESTING,
362 POINTERS_FROM_HERE_ARE_INTERESTING,
364 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
365 IN_TO_SPACE, // All pages in new space has one of these two set.
366 NEW_SPACE_BELOW_AGE_MARK,
367 EVACUATION_CANDIDATE,
368 RESCAN_ON_EVACUATION,
369 NEVER_EVACUATE, // May contain immortal immutables.
370 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
372 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
373 // otherwise marking bits are still intact.
376 // Large objects can have a progress bar in their page header. These object
377 // are scanned in increments and will be kept black while being scanned.
378 // Even if the mutator writes to them they will be kept black and a white
379 // to grey transition is performed in the value.
382 // This flag is intended to be used for testing. Works only when both
383 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
384 // are set. It forces the page to become an evacuation candidate at next
385 // candidates selection cycle.
386 FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
388 // Last flag, keep at bottom.
389 NUM_MEMORY_CHUNK_FLAGS
393 static const int kPointersToHereAreInterestingMask =
394 1 << POINTERS_TO_HERE_ARE_INTERESTING;
396 static const int kPointersFromHereAreInterestingMask =
397 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
399 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
401 static const int kSkipEvacuationSlotsRecordingMask =
402 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
403 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
406 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
408 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
410 void SetFlagTo(int flag, bool value) {
418 bool IsFlagSet(int flag) {
419 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
422 // Set or clear multiple flags at a time. The flags in the mask
423 // are set to the value in "flags", the rest retain the current value
425 void SetFlags(intptr_t flags, intptr_t mask) {
426 flags_ = (flags_ & ~mask) | (flags & mask);
429 // Return all current flags.
430 intptr_t GetFlags() { return flags_; }
433 // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
434 // not be performed on that page.
435 // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
436 // not touch the page memory anymore.
437 // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
438 // SWEEPING_PENDING - This page is ready for parallel sweeping.
439 enum ParallelSweepingState {
442 SWEEPING_IN_PROGRESS,
446 ParallelSweepingState parallel_sweeping() {
447 return static_cast<ParallelSweepingState>(
448 base::Acquire_Load(¶llel_sweeping_));
451 void set_parallel_sweeping(ParallelSweepingState state) {
452 base::Release_Store(¶llel_sweeping_, state);
455 bool TryParallelSweeping() {
456 return base::Acquire_CompareAndSwap(¶llel_sweeping_, SWEEPING_PENDING,
457 SWEEPING_IN_PROGRESS) ==
461 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
463 // Manage live byte count (count of bytes known to be live,
464 // because they are marked black).
465 void ResetLiveBytes() {
466 if (FLAG_gc_verbose) {
467 PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
470 live_byte_count_ = 0;
472 void IncrementLiveBytes(int by) {
473 if (FLAG_gc_verbose) {
474 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
475 live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
476 live_byte_count_ + by);
478 live_byte_count_ += by;
479 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
482 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
483 return live_byte_count_;
486 int write_barrier_counter() {
487 return static_cast<int>(write_barrier_counter_);
490 void set_write_barrier_counter(int counter) {
491 write_barrier_counter_ = counter;
495 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
496 return progress_bar_;
499 void set_progress_bar(int progress_bar) {
500 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
501 progress_bar_ = progress_bar;
504 void ResetProgressBar() {
505 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
507 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
511 bool IsLeftOfProgressBar(Object** slot) {
512 Address slot_address = reinterpret_cast<Address>(slot);
513 DCHECK(slot_address > this->address());
514 return (slot_address - (this->address() + kObjectStartOffset)) <
518 static void IncrementLiveBytesFromGC(Address address, int by) {
519 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
522 static void IncrementLiveBytesFromMutator(Address address, int by);
524 static const intptr_t kAlignment =
525 (static_cast<uintptr_t>(1) << kPageSizeBits);
527 static const intptr_t kAlignmentMask = kAlignment - 1;
529 static const intptr_t kSizeOffset = 0;
531 static const intptr_t kLiveBytesOffset =
532 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
533 kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
535 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
537 static const size_t kWriteBarrierCounterOffset =
538 kSlotsBufferOffset + kPointerSize + kPointerSize;
540 static const size_t kHeaderSize =
541 kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
542 kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
544 static const int kBodyOffset =
545 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
547 // The start offset of the object area in a page. Aligned to both maps and
548 // code alignment to be suitable for both. Also aligned to 32 words because
549 // the marking bitmap is arranged in 32 bit chunks.
550 static const int kObjectStartAlignment = 32 * kPointerSize;
551 static const int kObjectStartOffset =
553 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
555 size_t size() const { return size_; }
557 void set_size(size_t size) { size_ = size; }
559 void SetArea(Address area_start, Address area_end) {
560 area_start_ = area_start;
561 area_end_ = area_end;
564 Executability executable() {
565 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
569 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
572 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
574 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
576 // ---------------------------------------------------------------------
579 inline Bitmap* markbits() {
580 return Bitmap::FromAddress(address() + kHeaderSize);
583 void PrintMarkbits() { markbits()->Print(); }
585 inline uint32_t AddressToMarkbitIndex(Address addr) {
586 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
589 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
590 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
592 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
595 inline Address MarkbitIndexToAddress(uint32_t index) {
596 return this->address() + (index << kPointerSizeLog2);
599 void InsertAfter(MemoryChunk* other);
602 inline Heap* heap() const { return heap_; }
604 static const int kFlagsOffset = kPointerSize;
606 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
608 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
610 bool IsEvacuationCandidate() {
611 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
612 return IsFlagSet(EVACUATION_CANDIDATE);
615 bool ShouldSkipEvacuationSlotRecording() {
616 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
619 inline SkipList* skip_list() { return skip_list_; }
621 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
623 inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
625 inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
627 void MarkEvacuationCandidate() {
628 DCHECK(!IsFlagSet(NEVER_EVACUATE));
629 DCHECK(slots_buffer_ == NULL);
630 SetFlag(EVACUATION_CANDIDATE);
633 void ClearEvacuationCandidate() {
634 DCHECK(slots_buffer_ == NULL);
635 ClearFlag(EVACUATION_CANDIDATE);
638 Address area_start() { return area_start_; }
639 Address area_end() { return area_end_; }
640 int area_size() { return static_cast<int>(area_end() - area_start()); }
641 bool CommitArea(size_t requested);
643 // Approximate amount of physical memory committed for this chunk.
644 size_t CommittedPhysicalMemory() { return high_water_mark_; }
646 static inline void UpdateHighWaterMark(Address mark);
652 // Start and end of allocatable memory on this chunk.
656 // If the chunk needs to remember its memory reservation, it is stored here.
657 base::VirtualMemory reservation_;
658 // The identity of the owning space. This is tagged as a failure pointer, but
659 // no failure can be in an object, so this can be distinguished from any entry
663 // Used by the store buffer to keep track of which pages to mark scan-on-
665 int store_buffer_counter_;
666 // Count of bytes marked black on page.
667 int live_byte_count_;
668 SlotsBuffer* slots_buffer_;
669 SkipList* skip_list_;
670 intptr_t write_barrier_counter_;
671 // Used by the incremental marker to keep track of the scanning progress in
672 // large objects that have a progress bar and are scanned in increments.
674 // Assuming the initial allocation on a page is sequential,
675 // count highest number of bytes ever allocated on the page.
676 int high_water_mark_;
678 base::AtomicWord parallel_sweeping_;
680 // PagedSpace free-list statistics.
681 int available_in_small_free_list_;
682 int available_in_medium_free_list_;
683 int available_in_large_free_list_;
684 int available_in_huge_free_list_;
685 int non_available_small_blocks_;
687 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
688 Address area_start, Address area_end,
689 Executability executable, Space* owner);
692 // next_chunk_ holds a pointer of type MemoryChunk
693 base::AtomicWord next_chunk_;
694 // prev_chunk_ holds a pointer of type MemoryChunk
695 base::AtomicWord prev_chunk_;
697 friend class MemoryAllocator;
701 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
704 // -----------------------------------------------------------------------------
705 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
707 // The only way to get a page pointer is by calling factory methods:
708 // Page* p = Page::FromAddress(addr); or
709 // Page* p = Page::FromAllocationTop(top);
710 class Page : public MemoryChunk {
712 // Returns the page containing a given address. The address ranges
713 // from [page_addr .. page_addr + kPageSize[
714 // This only works if the object is in fact in a page. See also MemoryChunk::
715 // FromAddress() and FromAnyAddress().
716 INLINE(static Page* FromAddress(Address a)) {
717 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
720 // Returns the page containing an allocation top. Because an allocation
721 // top address can be the upper bound of the page, we need to subtract
722 // it with kPointerSize first. The address ranges from
723 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
724 INLINE(static Page* FromAllocationTop(Address top)) {
725 Page* p = FromAddress(top - kPointerSize);
729 // Returns the next page in the chain of pages owned by a space.
730 inline Page* next_page();
731 inline Page* prev_page();
732 inline void set_next_page(Page* page);
733 inline void set_prev_page(Page* page);
735 // Checks whether an address is page aligned.
736 static bool IsAlignedToPageSize(Address a) {
737 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
740 // Returns the offset of a given address to this page.
741 INLINE(int Offset(Address a)) {
742 int offset = static_cast<int>(a - address());
746 // Returns the address for a given offset to the this page.
747 Address OffsetToAddress(int offset) {
748 DCHECK_PAGE_OFFSET(offset);
749 return address() + offset;
752 // ---------------------------------------------------------------------
754 // Page size in bytes. This must be a multiple of the OS page size.
755 static const int kPageSize = 1 << kPageSizeBits;
757 // Maximum object size that fits in a page. Objects larger than that size
758 // are allocated in large object space and are never moved in memory. This
759 // also applies to new space allocation, since objects are never migrated
760 // from new space to large object space. Takes double alignment into account.
761 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
764 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
766 inline void ClearGCFields();
768 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
769 Executability executable, PagedSpace* owner);
771 void InitializeAsAnchor(PagedSpace* owner);
773 bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
774 void SetWasSwept() { SetFlag(WAS_SWEPT); }
775 void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
777 void ResetFreeListStatistics();
779 int LiveBytesFromFreeList() {
780 return area_size() - non_available_small_blocks_ -
781 available_in_small_free_list_ - available_in_medium_free_list_ -
782 available_in_large_free_list_ - available_in_huge_free_list_;
785 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
786 type name() { return name##_; } \
787 void set_##name(type name) { name##_ = name; } \
788 void add_##name(type name) { name##_ += name; }
790 FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks)
791 FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list)
792 FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list)
793 FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list)
794 FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list)
796 #undef FRAGMENTATION_STATS_ACCESSORS
802 friend class MemoryAllocator;
806 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
809 class LargePage : public MemoryChunk {
811 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
813 inline LargePage* next_page() const {
814 return static_cast<LargePage*>(next_chunk());
817 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
820 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
822 friend class MemoryAllocator;
825 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
827 // ----------------------------------------------------------------------------
828 // Space is the abstract superclass for all allocation spaces.
829 class Space : public Malloced {
831 Space(Heap* heap, AllocationSpace id, Executability executable)
832 : heap_(heap), id_(id), executable_(executable) {}
836 Heap* heap() const { return heap_; }
838 // Does the space need executable memory?
839 Executability executable() { return executable_; }
841 // Identity used in error reporting.
842 AllocationSpace identity() { return id_; }
844 // Returns allocated size.
845 virtual intptr_t Size() = 0;
847 // Returns size of objects. Can differ from the allocated size
848 // (e.g. see LargeObjectSpace).
849 virtual intptr_t SizeOfObjects() { return Size(); }
851 // Return the total amount of memory committed for new space.
852 virtual intptr_t CommittedMemory() = 0;
854 // Approximate amount of physical memory committed for this space.
855 virtual size_t CommittedPhysicalMemory() = 0;
857 // Return the available bytes without growing.
858 virtual intptr_t Available() = 0;
860 virtual int RoundSizeDownToObjectAlignment(int size) {
861 if (id_ == CODE_SPACE) {
862 return RoundDown(size, kCodeAlignment);
864 return RoundDown(size, kPointerSize);
869 virtual void Print() = 0;
875 Executability executable_;
879 // ----------------------------------------------------------------------------
880 // All heap objects containing executable code (code objects) must be allocated
881 // from a 2 GB range of memory, so that they can call each other using 32-bit
882 // displacements. This happens automatically on 32-bit platforms, where 32-bit
883 // displacements cover the entire 4GB virtual address space. On 64-bit
884 // platforms, we support this using the CodeRange object, which reserves and
885 // manages a range of virtual memory.
888 explicit CodeRange(Isolate* isolate);
889 ~CodeRange() { TearDown(); }
891 // Reserves a range of virtual memory, but does not commit any of it.
892 // Can only be called once, at heap initialization time.
893 // Returns false on failure.
894 bool SetUp(size_t requested_size);
896 // Frees the range of virtual memory, and frees the data structures used to
900 bool valid() { return code_range_ != NULL; }
903 return static_cast<Address>(code_range_->address());
907 return code_range_->size();
909 bool contains(Address address) {
910 if (!valid()) return false;
911 Address start = static_cast<Address>(code_range_->address());
912 return start <= address && address < start + code_range_->size();
915 // Allocates a chunk of memory from the large-object portion of
916 // the code range. On platforms with no separate code range, should
918 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
919 const size_t commit_size,
921 bool CommitRawMemory(Address start, size_t length);
922 bool UncommitRawMemory(Address start, size_t length);
923 void FreeRawMemory(Address buf, size_t length);
925 void ReserveEmergencyBlock();
926 void ReleaseEmergencyBlock();
931 // The reserved range of virtual memory that all code objects are put in.
932 base::VirtualMemory* code_range_;
933 // Plain old data class, just a struct plus a constructor.
936 FreeBlock() : start(0), size(0) {}
937 FreeBlock(Address start_arg, size_t size_arg)
938 : start(start_arg), size(size_arg) {
939 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
940 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
942 FreeBlock(void* start_arg, size_t size_arg)
943 : start(static_cast<Address>(start_arg)), size(size_arg) {
944 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
945 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
952 // Freed blocks of memory are added to the free list. When the allocation
953 // list is exhausted, the free list is sorted and merged to make the new
955 List<FreeBlock> free_list_;
956 // Memory is allocated from the free blocks on the allocation list.
957 // The block at current_allocation_block_index_ is the current block.
958 List<FreeBlock> allocation_list_;
959 int current_allocation_block_index_;
961 // Emergency block guarantees that we can always allocate a page for
962 // evacuation candidates when code space is compacted. Emergency block is
963 // reserved immediately after GC and is released immedietely before
964 // allocating a page for evacuation.
965 FreeBlock emergency_block_;
967 // Finds a block on the allocation list that contains at least the
968 // requested amount of memory. If none is found, sorts and merges
969 // the existing free memory blocks, and searches again.
970 // If none can be found, returns false.
971 bool GetNextAllocationBlock(size_t requested);
972 // Compares the start addresses of two free blocks.
973 static int CompareFreeBlockAddress(const FreeBlock* left,
974 const FreeBlock* right);
975 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
976 void ReleaseBlock(const FreeBlock* block);
978 DISALLOW_COPY_AND_ASSIGN(CodeRange);
984 SkipList() { Clear(); }
987 for (int idx = 0; idx < kSize; idx++) {
988 starts_[idx] = reinterpret_cast<Address>(-1);
992 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
994 void AddObject(Address addr, int size) {
995 int start_region = RegionNumber(addr);
996 int end_region = RegionNumber(addr + size - kPointerSize);
997 for (int idx = start_region; idx <= end_region; idx++) {
998 if (starts_[idx] > addr) starts_[idx] = addr;
1002 static inline int RegionNumber(Address addr) {
1003 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1006 static void Update(Address addr, int size) {
1007 Page* page = Page::FromAddress(addr);
1008 SkipList* list = page->skip_list();
1010 list = new SkipList();
1011 page->set_skip_list(list);
1014 list->AddObject(addr, size);
1018 static const int kRegionSizeLog2 = 13;
1019 static const int kRegionSize = 1 << kRegionSizeLog2;
1020 static const int kSize = Page::kPageSize / kRegionSize;
1022 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1024 Address starts_[kSize];
1028 // ----------------------------------------------------------------------------
1029 // A space acquires chunks of memory from the operating system. The memory
1030 // allocator allocated and deallocates pages for the paged heap spaces and large
1031 // pages for large object space.
1033 // Each space has to manage it's own pages.
1035 class MemoryAllocator {
1037 explicit MemoryAllocator(Isolate* isolate);
1039 // Initializes its internal bookkeeping structures.
1040 // Max capacity of the total space and executable memory limit.
1041 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1045 Page* AllocatePage(intptr_t size, PagedSpace* owner,
1046 Executability executable);
1048 LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
1049 Executability executable);
1051 void Free(MemoryChunk* chunk);
1053 // Returns the maximum available bytes of heaps.
1054 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1056 // Returns allocated spaces in bytes.
1057 intptr_t Size() { return size_; }
1059 // Returns the maximum available executable bytes of heaps.
1060 intptr_t AvailableExecutable() {
1061 if (capacity_executable_ < size_executable_) return 0;
1062 return capacity_executable_ - size_executable_;
1065 // Returns allocated executable spaces in bytes.
1066 intptr_t SizeExecutable() { return size_executable_; }
1068 // Returns maximum available bytes that the old space can have.
1069 intptr_t MaxAvailable() {
1070 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1073 // Returns an indication of whether a pointer is in a space that has
1074 // been allocated by this MemoryAllocator.
1075 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1076 return address < lowest_ever_allocated_ ||
1077 address >= highest_ever_allocated_;
1081 // Reports statistic info of the space.
1082 void ReportStatistics();
1085 // Returns a MemoryChunk in which the memory region from commit_area_size to
1086 // reserve_area_size of the chunk area is reserved but not committed, it
1087 // could be committed later by calling MemoryChunk::CommitArea.
1088 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1089 intptr_t commit_area_size,
1090 Executability executable, Space* space);
1092 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1093 base::VirtualMemory* controller);
1094 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1095 size_t alignment, Executability executable,
1096 base::VirtualMemory* controller);
1098 bool CommitMemory(Address addr, size_t size, Executability executable);
1100 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1101 void FreeMemory(Address addr, size_t size, Executability executable);
1103 // Commit a contiguous block of memory from the initial chunk. Assumes that
1104 // the address is not NULL, the size is greater than zero, and that the
1105 // block is contained in the initial chunk. Returns true if it succeeded
1106 // and false otherwise.
1107 bool CommitBlock(Address start, size_t size, Executability executable);
1109 // Uncommit a contiguous block of memory [start..(start+size)[.
1110 // start is not NULL, the size is greater than zero, and the
1111 // block is contained in the initial chunk. Returns true if it succeeded
1112 // and false otherwise.
1113 bool UncommitBlock(Address start, size_t size);
1115 // Zaps a contiguous block of memory [start..(start+size)[ thus
1116 // filling it up with a recognizable non-NULL bit pattern.
1117 void ZapBlock(Address start, size_t size);
1119 void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
1122 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1123 ObjectSpace space, AllocationAction action);
1125 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
1127 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
1129 static int CodePageGuardStartOffset();
1131 static int CodePageGuardSize();
1133 static int CodePageAreaStartOffset();
1135 static int CodePageAreaEndOffset();
1137 static int CodePageAreaSize() {
1138 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1141 static int PageAreaSize(AllocationSpace space) {
1142 DCHECK_NE(LO_SPACE, space);
1143 return (space == CODE_SPACE) ? CodePageAreaSize()
1144 : Page::kMaxRegularHeapObjectSize;
1147 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1148 Address start, size_t commit_size,
1149 size_t reserved_size);
1154 // Maximum space size in bytes.
1156 // Maximum subset of capacity_ that can be executable
1157 size_t capacity_executable_;
1159 // Allocated space size in bytes.
1161 // Allocated executable space size in bytes.
1162 size_t size_executable_;
1164 // We keep the lowest and highest addresses allocated as a quick way
1165 // of determining that pointers are outside the heap. The estimate is
1166 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1167 // to our heap. The range is [lowest, highest[, inclusive on the low end
1168 // and exclusive on the high end.
1169 void* lowest_ever_allocated_;
1170 void* highest_ever_allocated_;
1172 struct MemoryAllocationCallbackRegistration {
1173 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1175 AllocationAction action)
1176 : callback(callback), space(space), action(action) {}
1177 MemoryAllocationCallback callback;
1179 AllocationAction action;
1182 // A List of callback that are triggered when memory is allocated or free'd
1183 List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
1185 // Initializes pages in a chunk. Returns the first page address.
1186 // This function and GetChunkId() are provided for the mark-compact
1187 // collector to rebuild page headers in the from space, which is
1188 // used as a marking stack and its page headers are destroyed.
1189 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1192 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1193 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1194 highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1197 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1201 // -----------------------------------------------------------------------------
1202 // Interface for heap object iterator to be implemented by all object space
1203 // object iterators.
1205 // NOTE: The space specific object iterators also implements the own next()
1206 // method which is used to avoid using virtual functions
1207 // iterating a specific space.
1209 class ObjectIterator : public Malloced {
1211 virtual ~ObjectIterator() {}
1213 virtual HeapObject* next_object() = 0;
1217 // -----------------------------------------------------------------------------
1218 // Heap object iterator in new/old/map spaces.
1220 // A HeapObjectIterator iterates objects from the bottom of the given space
1221 // to its top or from the bottom of the given page to its top.
1223 // If objects are allocated in the page during iteration the iterator may
1224 // or may not iterate over those objects. The caller must create a new
1225 // iterator in order to be sure to visit these new objects.
1226 class HeapObjectIterator : public ObjectIterator {
1228 // Creates a new object iterator in a given space.
1229 // If the size function is not given, the iterator calls the default
1231 explicit HeapObjectIterator(PagedSpace* space);
1232 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1233 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1235 // Advance to the next object, skipping free spaces and other fillers and
1236 // skipping the special garbage section of which there is one per space.
1237 // Returns NULL when the iteration has ended.
1238 inline HeapObject* Next() {
1240 HeapObject* next_obj = FromCurrentPage();
1241 if (next_obj != NULL) return next_obj;
1242 } while (AdvanceToNextPage());
1246 virtual HeapObject* next_object() { return Next(); }
1249 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1251 Address cur_addr_; // Current iteration point.
1252 Address cur_end_; // End iteration point.
1253 HeapObjectCallback size_func_; // Size function or NULL.
1255 PageMode page_mode_;
1257 // Fast (inlined) path of next().
1258 inline HeapObject* FromCurrentPage();
1260 // Slow path of next(), goes into the next page. Returns false if the
1261 // iteration has ended.
1262 bool AdvanceToNextPage();
1264 // Initializes fields.
1265 inline void Initialize(PagedSpace* owner, Address start, Address end,
1266 PageMode mode, HeapObjectCallback size_func);
1270 // -----------------------------------------------------------------------------
1271 // A PageIterator iterates the pages in a paged space.
1273 class PageIterator BASE_EMBEDDED {
1275 explicit inline PageIterator(PagedSpace* space);
1277 inline bool has_next();
1278 inline Page* next();
1282 Page* prev_page_; // Previous page returned.
1283 // Next page that will be returned. Cached here so that we can use this
1284 // iterator for operations that deallocate pages.
1289 // -----------------------------------------------------------------------------
1290 // A space has a circular list of pages. The next page can be accessed via
1291 // Page::next_page() call.
1293 // An abstraction of allocation and relocation pointers in a page-structured
1295 class AllocationInfo {
1297 AllocationInfo() : top_(NULL), limit_(NULL) {}
1299 INLINE(void set_top(Address top)) {
1300 SLOW_DCHECK(top == NULL ||
1301 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1305 INLINE(Address top()) const {
1306 SLOW_DCHECK(top_ == NULL ||
1307 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1311 Address* top_address() { return &top_; }
1313 INLINE(void set_limit(Address limit)) {
1314 SLOW_DCHECK(limit == NULL ||
1315 (reinterpret_cast<intptr_t>(limit) & kHeapObjectTagMask) == 0);
1319 INLINE(Address limit()) const {
1320 SLOW_DCHECK(limit_ == NULL ||
1321 (reinterpret_cast<intptr_t>(limit_) & kHeapObjectTagMask) ==
1326 Address* limit_address() { return &limit_; }
1329 bool VerifyPagedAllocation() {
1330 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
1336 // Current allocation top.
1338 // Current allocation limit.
1343 // An abstraction of the accounting statistics of a page-structured space.
1344 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1345 // including page bookkeeping structures) currently in the space. The 'size'
1346 // of a space is the number of allocated bytes, the 'waste' in the space is
1347 // the number of bytes that are not allocated and not available to
1348 // allocation without reorganizing the space via a GC (e.g. small blocks due
1349 // to internal fragmentation, top of page areas in map space), and the bytes
1350 // 'available' is the number of unallocated bytes that are not waste. The
1351 // capacity is the sum of size, waste, and available.
1353 // The stats are only set by functions that ensure they stay balanced. These
1354 // functions increase or decrease one of the non-capacity stats in
1355 // conjunction with capacity, or else they always balance increases and
1356 // decreases to the non-capacity stats.
1357 class AllocationStats BASE_EMBEDDED {
1359 AllocationStats() { Clear(); }
1361 // Zero out all the allocation statistics (i.e., no capacity).
1369 void ClearSizeWaste() {
1374 // Reset the allocation statistics (i.e., available = capacity with no
1375 // wasted or allocated bytes).
1381 // Accessors for the allocation statistics.
1382 intptr_t Capacity() { return capacity_; }
1383 intptr_t MaxCapacity() { return max_capacity_; }
1384 intptr_t Size() { return size_; }
1385 intptr_t Waste() { return waste_; }
1387 // Grow the space by adding available bytes. They are initially marked as
1388 // being in use (part of the size), but will normally be immediately freed,
1389 // putting them on the free list and removing them from size_.
1390 void ExpandSpace(int size_in_bytes) {
1391 capacity_ += size_in_bytes;
1392 size_ += size_in_bytes;
1393 if (capacity_ > max_capacity_) {
1394 max_capacity_ = capacity_;
1399 // Shrink the space by removing available bytes. Since shrinking is done
1400 // during sweeping, bytes have been marked as being in use (part of the size)
1401 // and are hereby freed.
1402 void ShrinkSpace(int size_in_bytes) {
1403 capacity_ -= size_in_bytes;
1404 size_ -= size_in_bytes;
1408 // Allocate from available bytes (available -> size).
1409 void AllocateBytes(intptr_t size_in_bytes) {
1410 size_ += size_in_bytes;
1414 // Free allocated bytes, making them available (size -> available).
1415 void DeallocateBytes(intptr_t size_in_bytes) {
1416 size_ -= size_in_bytes;
1420 // Waste free bytes (available -> waste).
1421 void WasteBytes(int size_in_bytes) {
1422 DCHECK(size_in_bytes >= 0);
1423 waste_ += size_in_bytes;
1428 intptr_t max_capacity_;
1434 // -----------------------------------------------------------------------------
1435 // Free lists for old object spaces
1437 // The free list category holds a pointer to the top element and a pointer to
1438 // the end element of the linked list of free memory blocks.
1439 class FreeListCategory {
1441 FreeListCategory() : top_(0), end_(NULL), available_(0) {}
1443 intptr_t Concatenate(FreeListCategory* category);
1447 void Free(FreeSpace* node, int size_in_bytes);
1449 FreeSpace* PickNodeFromList(int* node_size);
1450 FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
1452 intptr_t EvictFreeListItemsInList(Page* p);
1453 bool ContainsPageFreeListItemsInList(Page* p);
1455 void RepairFreeList(Heap* heap);
1457 FreeSpace* top() const {
1458 return reinterpret_cast<FreeSpace*>(base::NoBarrier_Load(&top_));
1461 void set_top(FreeSpace* top) {
1462 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1465 FreeSpace* end() const { return end_; }
1466 void set_end(FreeSpace* end) { end_ = end; }
1468 int* GetAvailableAddress() { return &available_; }
1469 int available() const { return available_; }
1470 void set_available(int available) { available_ = available; }
1472 base::Mutex* mutex() { return &mutex_; }
1474 bool IsEmpty() { return top() == 0; }
1477 intptr_t SumFreeList();
1478 int FreeListLength();
1482 // top_ points to the top FreeSpace* in the free list category.
1483 base::AtomicWord top_;
1487 // Total available bytes in all blocks of this free list category.
1492 // The free list for the old space. The free list is organized in such a way
1493 // as to encourage objects allocated around the same time to be near each
1494 // other. The normal way to allocate is intended to be by bumping a 'top'
1495 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1496 // find a new space to allocate from. This is done with the free list, which
1497 // is divided up into rough categories to cut down on waste. Having finer
1498 // categories would scatter allocation more.
1500 // The old space free list is organized in categories.
1501 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1502 // They can be reclaimed by the compactor. However the distance between top
1503 // and limit may be this small.
1504 // 32-255 words: There is a list of spaces this large. It is used for top and
1505 // limit when the object we need to allocate is 1-31 words in size. These
1506 // spaces are called small.
1507 // 256-2047 words: There is a list of spaces this large. It is used for top and
1508 // limit when the object we need to allocate is 32-255 words in size. These
1509 // spaces are called medium.
1510 // 1048-16383 words: There is a list of spaces this large. It is used for top
1511 // and limit when the object we need to allocate is 256-2047 words in size.
1512 // These spaces are call large.
1513 // At least 16384 words. This list is for objects of 2048 words or larger.
1514 // Empty pages are added to this list. These spaces are called huge.
1517 explicit FreeList(PagedSpace* owner);
1519 intptr_t Concatenate(FreeList* free_list);
1521 // Clear the free list.
1524 // Return the number of bytes available on the free list.
1525 intptr_t available() {
1526 return small_list_.available() + medium_list_.available() +
1527 large_list_.available() + huge_list_.available();
1530 // Place a node on the free list. The block of size 'size_in_bytes'
1531 // starting at 'start' is placed on the free list. The return value is the
1532 // number of bytes that have been lost due to internal fragmentation by
1533 // freeing the block. Bookkeeping information will be written to the block,
1534 // i.e., its contents will be destroyed. The start address should be word
1535 // aligned, and the size should be a non-zero multiple of the word size.
1536 int Free(Address start, int size_in_bytes);
1538 // This method returns how much memory can be allocated after freeing
1539 // maximum_freed memory.
1540 static inline int GuaranteedAllocatable(int maximum_freed) {
1541 if (maximum_freed < kSmallListMin) {
1543 } else if (maximum_freed <= kSmallListMax) {
1544 return kSmallAllocationMax;
1545 } else if (maximum_freed <= kMediumListMax) {
1546 return kMediumAllocationMax;
1547 } else if (maximum_freed <= kLargeListMax) {
1548 return kLargeAllocationMax;
1550 return maximum_freed;
1553 // Allocate a block of size 'size_in_bytes' from the free list. The block
1554 // is unitialized. A failure is returned if no block is available. The
1555 // number of bytes lost to fragmentation is returned in the output parameter
1556 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1557 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1560 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1561 large_list_.IsEmpty() && huge_list_.IsEmpty();
1566 intptr_t SumFreeLists();
1570 // Used after booting the VM.
1571 void RepairLists(Heap* heap);
1573 intptr_t EvictFreeListItems(Page* p);
1574 bool ContainsPageFreeListItems(Page* p);
1576 FreeListCategory* small_list() { return &small_list_; }
1577 FreeListCategory* medium_list() { return &medium_list_; }
1578 FreeListCategory* large_list() { return &large_list_; }
1579 FreeListCategory* huge_list() { return &huge_list_; }
1581 static const int kSmallListMin = 0x20 * kPointerSize;
1584 // The size range of blocks, in bytes.
1585 static const int kMinBlockSize = 3 * kPointerSize;
1586 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1588 FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1593 static const int kSmallListMax = 0xff * kPointerSize;
1594 static const int kMediumListMax = 0x7ff * kPointerSize;
1595 static const int kLargeListMax = 0x3fff * kPointerSize;
1596 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1597 static const int kMediumAllocationMax = kSmallListMax;
1598 static const int kLargeAllocationMax = kMediumListMax;
1599 FreeListCategory small_list_;
1600 FreeListCategory medium_list_;
1601 FreeListCategory large_list_;
1602 FreeListCategory huge_list_;
1604 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1608 class AllocationResult {
1610 // Implicit constructor from Object*.
1611 AllocationResult(Object* object) // NOLINT
1613 // AllocationResults can't return Smis, which are used to represent
1614 // failure and the space to retry in.
1615 CHECK(!object->IsSmi());
1618 AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
1620 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1621 return AllocationResult(space);
1624 inline bool IsRetry() { return object_->IsSmi(); }
1626 template <typename T>
1628 if (IsRetry()) return false;
1629 *obj = T::cast(object_);
1633 Object* ToObjectChecked() {
1638 AllocationSpace RetrySpace() {
1640 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
1644 explicit AllocationResult(AllocationSpace space)
1645 : object_(Smi::FromInt(static_cast<int>(space))) {}
1651 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
1654 class PagedSpace : public Space {
1656 // Creates a space with a maximum capacity, and an id.
1657 PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
1658 Executability executable);
1660 virtual ~PagedSpace() {}
1662 // Set up the space using the given address range of virtual memory (from
1663 // the memory allocator's initial chunk) if possible. If the block of
1664 // addresses is not big enough to contain a single page-aligned page, a
1665 // fresh chunk will be allocated.
1668 // Returns true if the space has been successfully set up and not
1669 // subsequently torn down.
1670 bool HasBeenSetUp();
1672 // Cleans up the space, frees all pages in this space except those belonging
1673 // to the initial chunk, uncommits addresses in the initial chunk.
1676 // Checks whether an object/address is in this space.
1677 inline bool Contains(Address a);
1678 bool Contains(HeapObject* o) { return Contains(o->address()); }
1679 // Unlike Contains() methods it is safe to call this one even for addresses
1680 // of unmapped memory.
1681 bool ContainsSafe(Address addr);
1683 // Given an address occupied by a live object, return that object if it is
1684 // in this space, or a Smi if it is not. The implementation iterates over
1685 // objects in the page containing the address, the cost is linear in the
1686 // number of objects in the page. It may be slow.
1687 Object* FindObject(Address addr);
1689 // During boot the free_space_map is created, and afterwards we may need
1690 // to write it into the free list nodes that were already created.
1691 void RepairFreeListsAfterDeserialization();
1693 // Prepares for a mark-compact GC.
1694 void PrepareForMarkCompact();
1696 // Current capacity without growing (Size() + Available()).
1697 intptr_t Capacity() { return accounting_stats_.Capacity(); }
1699 // Total amount of memory committed for this space. For paged
1700 // spaces this equals the capacity.
1701 intptr_t CommittedMemory() override { return Capacity(); }
1703 // The maximum amount of memory ever committed for this space.
1704 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1706 // Approximate amount of physical memory committed for this space.
1707 size_t CommittedPhysicalMemory() override;
1709 void ResetFreeListStatistics();
1711 // Sets the capacity, the available space and the wasted space to zero.
1712 // The stats are rebuilt during sweeping by adding each page to the
1713 // capacity and the size when it is encountered. As free spaces are
1714 // discovered during the sweeping they are subtracted from the size and added
1715 // to the available and wasted totals.
1717 accounting_stats_.ClearSizeWaste();
1718 ResetFreeListStatistics();
1721 // Increases the number of available bytes of that space.
1722 void AddToAccountingStats(intptr_t bytes) {
1723 accounting_stats_.DeallocateBytes(bytes);
1726 // Available bytes without growing. These are the bytes on the free list.
1727 // The bytes in the linear allocation area are not included in this total
1728 // because updating the stats would slow down allocation. New pages are
1729 // immediately added to the free list so they show up here.
1730 intptr_t Available() override { return free_list_.available(); }
1732 // Allocated bytes in this space. Garbage bytes that were not found due to
1733 // concurrent sweeping are counted as being allocated! The bytes in the
1734 // current linear allocation area (between top and limit) are also counted
1736 intptr_t Size() override { return accounting_stats_.Size(); }
1738 // As size, but the bytes in lazily swept pages are estimated and the bytes
1739 // in the current linear allocation area are not included.
1740 intptr_t SizeOfObjects() override;
1742 // Wasted bytes in this space. These are just the bytes that were thrown away
1743 // due to being too small to use for allocation. They do not include the
1744 // free bytes that were not found at all due to lazy sweeping.
1745 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1747 // Returns the allocation pointer in this space.
1748 Address top() { return allocation_info_.top(); }
1749 Address limit() { return allocation_info_.limit(); }
1751 // The allocation top address.
1752 Address* allocation_top_address() { return allocation_info_.top_address(); }
1754 // The allocation limit address.
1755 Address* allocation_limit_address() {
1756 return allocation_info_.limit_address();
1759 // Allocate the requested number of bytes in the space if possible, return a
1760 // failure object if not.
1761 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
1764 // Allocate the requested number of bytes in the space double aligned if
1765 // possible, return a failure object if not.
1766 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1767 int size_in_bytes, AllocationAlignment alignment);
1769 // Allocate the requested number of bytes in the space and consider allocation
1770 // alignment if needed.
1771 MUST_USE_RESULT inline AllocationResult AllocateRaw(
1772 int size_in_bytes, AllocationAlignment alignment);
1774 // Give a block of memory to the space's free list. It might be added to
1775 // the free list or accounted as waste.
1776 // If add_to_freelist is false then just accounting stats are updated and
1777 // no attempt to add area to free list is made.
1778 int Free(Address start, int size_in_bytes) {
1779 int wasted = free_list_.Free(start, size_in_bytes);
1780 accounting_stats_.DeallocateBytes(size_in_bytes);
1781 accounting_stats_.WasteBytes(wasted);
1782 return size_in_bytes - wasted;
1785 void ResetFreeList() { free_list_.Reset(); }
1787 // Set space allocation info.
1788 void SetTopAndLimit(Address top, Address limit) {
1789 DCHECK(top == limit ||
1790 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1791 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1792 allocation_info_.set_top(top);
1793 allocation_info_.set_limit(limit);
1796 // Empty space allocation info, returning unused area to free list.
1797 void EmptyAllocationInfo() {
1798 // Mark the old linear allocation area with a free space map so it can be
1799 // skipped when scanning the heap.
1800 int old_linear_size = static_cast<int>(limit() - top());
1801 Free(top(), old_linear_size);
1802 SetTopAndLimit(NULL, NULL);
1805 void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
1807 void IncreaseCapacity(int size);
1809 // Releases an unused page and shrinks the space.
1810 void ReleasePage(Page* page);
1812 // The dummy page that anchors the linked list of pages.
1813 Page* anchor() { return &anchor_; }
1816 // Verify integrity of this space.
1817 virtual void Verify(ObjectVisitor* visitor);
1819 // Overridden by subclasses to verify space-specific object
1820 // properties (e.g., only maps or free-list nodes are in map space).
1821 virtual void VerifyObject(HeapObject* obj) {}
1825 // Print meta info and objects in this space.
1826 void Print() override;
1828 // Reports statistics for the space
1829 void ReportStatistics();
1831 // Report code object related statistics
1832 void CollectCodeStatistics();
1833 static void ReportCodeStatistics(Isolate* isolate);
1834 static void ResetCodeStatistics(Isolate* isolate);
1837 // Evacuation candidates are swept by evacuator. Needs to return a valid
1838 // result before _and_ after evacuation has finished.
1839 static bool ShouldBeSweptBySweeperThreads(Page* p) {
1840 return !p->IsEvacuationCandidate() &&
1841 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
1844 void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
1846 void IncreaseUnsweptFreeBytes(Page* p) {
1847 DCHECK(ShouldBeSweptBySweeperThreads(p));
1848 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1851 void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
1853 void DecreaseUnsweptFreeBytes(Page* p) {
1854 DCHECK(ShouldBeSweptBySweeperThreads(p));
1855 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1858 void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
1860 // This function tries to steal size_in_bytes memory from the sweeper threads
1861 // free-lists. If it does not succeed stealing enough memory, it will wait
1862 // for the sweeper threads to finish sweeping.
1863 // It returns true when sweeping is completed and false otherwise.
1864 bool EnsureSweeperProgress(intptr_t size_in_bytes);
1866 void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
1868 Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
1870 Page* FirstPage() { return anchor_.next_page(); }
1871 Page* LastPage() { return anchor_.prev_page(); }
1873 void EvictEvacuationCandidatesFromFreeLists();
1877 // Returns the number of total pages in this space.
1878 int CountTotalPages();
1880 // Return size of allocatable area on a page in this space.
1881 inline int AreaSize() { return area_size_; }
1883 void CreateEmergencyMemory();
1884 void FreeEmergencyMemory();
1885 void UseEmergencyMemory();
1886 intptr_t MaxEmergencyMemoryAllocated();
1888 bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
1891 FreeList* free_list() { return &free_list_; }
1895 // Maximum capacity of this space.
1896 intptr_t max_capacity_;
1898 // Accounting information for this space.
1899 AllocationStats accounting_stats_;
1901 // The dummy page that anchors the double linked list of pages.
1904 // The space's free list.
1905 FreeList free_list_;
1907 // Normal allocation information.
1908 AllocationInfo allocation_info_;
1910 // The number of free bytes which could be reclaimed by advancing the
1911 // concurrent sweeper threads.
1912 intptr_t unswept_free_bytes_;
1914 // The sweeper threads iterate over the list of pointer and data space pages
1915 // and sweep these pages concurrently. They will stop sweeping after the
1916 // end_of_unswept_pages_ page.
1917 Page* end_of_unswept_pages_;
1919 // Emergency memory is the memory of a full page for a given space, allocated
1920 // conservatively before evacuating a page. If compaction fails due to out
1921 // of memory error the emergency memory can be used to complete compaction.
1922 // If not used, the emergency memory is released after compaction.
1923 MemoryChunk* emergency_memory_;
1925 // Expands the space by allocating a fixed number of pages. Returns false if
1926 // it cannot allocate requested number of pages from OS, or if the hard heap
1927 // size limit has been hit.
1930 // Generic fast case allocation function that tries linear allocation at the
1931 // address denoted by top in allocation_info_.
1932 inline HeapObject* AllocateLinearly(int size_in_bytes);
1934 // Generic fast case allocation function that tries aligned linear allocation
1935 // at the address denoted by top in allocation_info_. Writes the aligned
1936 // allocation size, which includes the filler size, to size_in_bytes.
1937 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
1938 AllocationAlignment alignment);
1940 // If sweeping is still in progress try to sweep unswept pages. If that is
1941 // not successful, wait for the sweeper threads and re-try free-list
1943 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
1946 // Slow path of AllocateRaw. This function is space-dependent.
1947 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
1949 friend class PageIterator;
1950 friend class MarkCompactCollector;
1954 class NumberAndSizeInfo BASE_EMBEDDED {
1956 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1958 int number() const { return number_; }
1959 void increment_number(int num) { number_ += num; }
1961 int bytes() const { return bytes_; }
1962 void increment_bytes(int size) { bytes_ += size; }
1975 // HistogramInfo class for recording a single "bar" of a histogram. This
1976 // class is used for collecting statistics to print to the log file.
1977 class HistogramInfo : public NumberAndSizeInfo {
1979 HistogramInfo() : NumberAndSizeInfo() {}
1981 const char* name() { return name_; }
1982 void set_name(const char* name) { name_ = name; }
1989 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
1995 class NewSpacePage : public MemoryChunk {
1997 // GC related flags copied from from-space to to-space when
1998 // flipping semispaces.
1999 static const intptr_t kCopyOnFlipFlagsMask =
2000 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2001 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2002 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2004 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2006 inline NewSpacePage* next_page() const {
2007 return static_cast<NewSpacePage*>(next_chunk());
2010 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
2012 inline NewSpacePage* prev_page() const {
2013 return static_cast<NewSpacePage*>(prev_chunk());
2016 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
2018 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
2020 bool is_anchor() { return !this->InNewSpace(); }
2022 static bool IsAtStart(Address addr) {
2023 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
2027 static bool IsAtEnd(Address addr) {
2028 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2031 Address address() { return reinterpret_cast<Address>(this); }
2033 // Finds the NewSpacePage containing the given address.
2034 static inline NewSpacePage* FromAddress(Address address_in_page) {
2035 Address page_start =
2036 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2037 ~Page::kPageAlignmentMask);
2038 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2042 // Find the page for a limit address. A limit address is either an address
2043 // inside a page, or the address right after the last byte of a page.
2044 static inline NewSpacePage* FromLimit(Address address_limit) {
2045 return NewSpacePage::FromAddress(address_limit - 1);
2048 // Checks if address1 and address2 are on the same new space page.
2049 static inline bool OnSamePage(Address address1, Address address2) {
2050 return NewSpacePage::FromAddress(address1) ==
2051 NewSpacePage::FromAddress(address2);
2055 // Create a NewSpacePage object that is only used as anchor
2056 // for the doubly-linked list of real pages.
2057 explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
2059 static NewSpacePage* Initialize(Heap* heap, Address start,
2060 SemiSpace* semi_space);
2062 // Intialize a fake NewSpacePage used as sentinel at the ends
2063 // of a doubly-linked list of real NewSpacePages.
2064 // Only uses the prev/next links, and sets flags to not be in new-space.
2065 void InitializeAsAnchor(SemiSpace* owner);
2067 friend class SemiSpace;
2068 friend class SemiSpaceIterator;
2072 // -----------------------------------------------------------------------------
2073 // SemiSpace in young generation
2075 // A semispace is a contiguous chunk of memory holding page-like memory
2076 // chunks. The mark-compact collector uses the memory of the first page in
2077 // the from space as a marking stack when tracing live objects.
2079 class SemiSpace : public Space {
2082 SemiSpace(Heap* heap, SemiSpaceId semispace)
2083 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2088 current_page_(NULL) {}
2090 // Sets up the semispace using the given chunk.
2091 void SetUp(Address start, int initial_capacity, int target_capacity,
2092 int maximum_capacity);
2094 // Tear down the space. Heap memory was not allocated by the space, so it
2095 // is not deallocated here.
2098 // True if the space has been set up but not torn down.
2099 bool HasBeenSetUp() { return start_ != NULL; }
2101 // Grow the semispace to the new capacity. The new capacity
2102 // requested must be larger than the current capacity and less than
2103 // the maximum capacity.
2104 bool GrowTo(int new_capacity);
2106 // Shrinks the semispace to the new capacity. The new capacity
2107 // requested must be more than the amount of used memory in the
2108 // semispace and less than the current capacity.
2109 bool ShrinkTo(int new_capacity);
2111 // Sets the total capacity. Only possible when the space is not committed.
2112 bool SetTotalCapacity(int new_capacity);
2114 // Returns the start address of the first page of the space.
2115 Address space_start() {
2116 DCHECK(anchor_.next_page() != &anchor_);
2117 return anchor_.next_page()->area_start();
2120 // Returns the start address of the current page of the space.
2121 Address page_low() { return current_page_->area_start(); }
2123 // Returns one past the end address of the space.
2124 Address space_end() { return anchor_.prev_page()->area_end(); }
2126 // Returns one past the end address of the current page of the space.
2127 Address page_high() { return current_page_->area_end(); }
2129 bool AdvancePage() {
2130 NewSpacePage* next_page = current_page_->next_page();
2131 if (next_page == anchor()) return false;
2132 current_page_ = next_page;
2136 // Resets the space to using the first page.
2139 // Age mark accessors.
2140 Address age_mark() { return age_mark_; }
2141 void set_age_mark(Address mark);
2143 // True if the address is in the address range of this semispace (not
2144 // necessarily below the allocation pointer).
2145 bool Contains(Address a) {
2146 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2147 reinterpret_cast<uintptr_t>(start_);
2150 // True if the object is a heap object in the address range of this
2151 // semispace (not necessarily below the allocation pointer).
2152 bool Contains(Object* o) {
2153 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2156 // If we don't have these here then SemiSpace will be abstract. However
2157 // they should never be called:
2159 intptr_t Size() override {
2164 intptr_t SizeOfObjects() override { return Size(); }
2166 intptr_t CommittedMemory() override {
2171 intptr_t Available() override {
2177 bool is_committed() { return committed_; }
2181 NewSpacePage* first_page() { return anchor_.next_page(); }
2182 NewSpacePage* current_page() { return current_page_; }
2185 virtual void Verify();
2189 void Print() override;
2190 // Validate a range of of addresses in a SemiSpace.
2191 // The "from" address must be on a page prior to the "to" address,
2192 // in the linked page order, or it must be earlier on the same page.
2193 static void AssertValidRange(Address from, Address to);
2196 inline static void AssertValidRange(Address from, Address to) {}
2199 // Returns the current total capacity of the semispace.
2200 int TotalCapacity() { return total_capacity_; }
2202 // Returns the target for total capacity of the semispace.
2203 int TargetCapacity() { return target_capacity_; }
2205 // Returns the maximum total capacity of the semispace.
2206 int MaximumTotalCapacity() { return maximum_total_capacity_; }
2208 // Returns the initial capacity of the semispace.
2209 int InitialTotalCapacity() { return initial_total_capacity_; }
2211 SemiSpaceId id() { return id_; }
2213 static void Swap(SemiSpace* from, SemiSpace* to);
2215 // Returns the maximum amount of memory ever committed by the semi space.
2216 size_t MaximumCommittedMemory() { return maximum_committed_; }
2218 // Approximate amount of physical memory committed for this space.
2219 size_t CommittedPhysicalMemory() override;
2222 // Flips the semispace between being from-space and to-space.
2223 // Copies the flags into the masked positions on all pages in the space.
2224 void FlipPages(intptr_t flags, intptr_t flag_mask);
2226 // Updates Capacity and MaximumCommitted based on new capacity.
2227 void SetCapacity(int new_capacity);
2229 NewSpacePage* anchor() { return &anchor_; }
2231 // The current and maximum total capacity of the space.
2232 int total_capacity_;
2233 int target_capacity_;
2234 int maximum_total_capacity_;
2235 int initial_total_capacity_;
2237 intptr_t maximum_committed_;
2239 // The start address of the space.
2241 // Used to govern object promotion during mark-compact collection.
2244 // Masks and comparison values to test for containment in this semispace.
2245 uintptr_t address_mask_;
2246 uintptr_t object_mask_;
2247 uintptr_t object_expected_;
2252 NewSpacePage anchor_;
2253 NewSpacePage* current_page_;
2255 friend class SemiSpaceIterator;
2256 friend class NewSpacePageIterator;
2260 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2261 // semispace of the heap's new space. It iterates over the objects in the
2262 // semispace from a given start address (defaulting to the bottom of the
2263 // semispace) to the top of the semispace. New objects allocated after the
2264 // iterator is created are not iterated.
2265 class SemiSpaceIterator : public ObjectIterator {
2267 // Create an iterator over the objects in the given space. If no start
2268 // address is given, the iterator starts from the bottom of the space. If
2269 // no size function is given, the iterator calls Object::Size().
2271 // Iterate over all of allocated to-space.
2272 explicit SemiSpaceIterator(NewSpace* space);
2273 // Iterate over all of allocated to-space, with a custome size function.
2274 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2275 // Iterate over part of allocated to-space, from start to the end
2277 SemiSpaceIterator(NewSpace* space, Address start);
2278 // Iterate from one address to another in the same semi-space.
2279 SemiSpaceIterator(Address from, Address to);
2281 HeapObject* Next() {
2282 if (current_ == limit_) return NULL;
2283 if (NewSpacePage::IsAtEnd(current_)) {
2284 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2285 page = page->next_page();
2286 DCHECK(!page->is_anchor());
2287 current_ = page->area_start();
2288 if (current_ == limit_) return NULL;
2291 HeapObject* object = HeapObject::FromAddress(current_);
2292 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2298 // Implementation of the ObjectIterator functions.
2299 virtual HeapObject* next_object() { return Next(); }
2302 void Initialize(Address start, Address end, HeapObjectCallback size_func);
2304 // The current iteration point.
2306 // The end of iteration.
2308 // The callback function.
2309 HeapObjectCallback size_func_;
2313 // -----------------------------------------------------------------------------
2314 // A PageIterator iterates the pages in a semi-space.
2315 class NewSpacePageIterator BASE_EMBEDDED {
2317 // Make an iterator that runs over all pages in to-space.
2318 explicit inline NewSpacePageIterator(NewSpace* space);
2320 // Make an iterator that runs over all pages in the given semispace,
2321 // even those not used in allocation.
2322 explicit inline NewSpacePageIterator(SemiSpace* space);
2324 // Make iterator that iterates from the page containing start
2325 // to the page that contains limit in the same semispace.
2326 inline NewSpacePageIterator(Address start, Address limit);
2328 inline bool has_next();
2329 inline NewSpacePage* next();
2332 NewSpacePage* prev_page_; // Previous page returned.
2333 // Next page that will be returned. Cached here so that we can use this
2334 // iterator for operations that deallocate pages.
2335 NewSpacePage* next_page_;
2336 // Last page returned.
2337 NewSpacePage* last_page_;
2341 // -----------------------------------------------------------------------------
2342 // The young generation space.
2344 // The new space consists of a contiguous pair of semispaces. It simply
2345 // forwards most functions to the appropriate semispace.
2347 class NewSpace : public Space {
2350 explicit NewSpace(Heap* heap)
2351 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2352 to_space_(heap, kToSpace),
2353 from_space_(heap, kFromSpace),
2355 inline_allocation_limit_step_(0) {}
2357 // Sets up the new space using the given chunk.
2358 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2360 // Tears down the space. Heap memory was not allocated by the space, so it
2361 // is not deallocated here.
2364 // True if the space has been set up but not torn down.
2365 bool HasBeenSetUp() {
2366 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2369 // Flip the pair of spaces.
2372 // Grow the capacity of the semispaces. Assumes that they are not at
2373 // their maximum capacity.
2376 // Grow the capacity of the semispaces by one page.
2379 // Shrink the capacity of the semispaces.
2382 // True if the address or object lies in the address range of either
2383 // semispace (not necessarily below the allocation pointer).
2384 bool Contains(Address a) {
2385 return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
2386 reinterpret_cast<uintptr_t>(start_);
2389 bool Contains(Object* o) {
2390 Address a = reinterpret_cast<Address>(o);
2391 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2394 // Return the allocated bytes in the active semispace.
2395 intptr_t Size() override {
2396 return pages_used_ * NewSpacePage::kAreaSize +
2397 static_cast<int>(top() - to_space_.page_low());
2400 // The same, but returning an int. We have to have the one that returns
2401 // intptr_t because it is inherited, but if we know we are dealing with the
2402 // new space, which can't get as big as the other spaces then this is useful:
2403 int SizeAsInt() { return static_cast<int>(Size()); }
2405 // Return the allocatable capacity of a semispace.
2406 intptr_t Capacity() {
2407 SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2408 return (to_space_.TotalCapacity() / Page::kPageSize) *
2409 NewSpacePage::kAreaSize;
2412 // Return the current size of a semispace, allocatable and non-allocatable
2414 intptr_t TotalCapacity() {
2415 DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
2416 return to_space_.TotalCapacity();
2419 // Return the total amount of memory committed for new space.
2420 intptr_t CommittedMemory() override {
2421 if (from_space_.is_committed()) return 2 * Capacity();
2422 return TotalCapacity();
2425 // Return the total amount of memory committed for new space.
2426 intptr_t MaximumCommittedMemory() {
2427 return to_space_.MaximumCommittedMemory() +
2428 from_space_.MaximumCommittedMemory();
2431 // Approximate amount of physical memory committed for this space.
2432 size_t CommittedPhysicalMemory() override;
2434 // Return the available bytes without growing.
2435 intptr_t Available() override { return Capacity() - Size(); }
2437 intptr_t PagesFromStart(Address addr) {
2438 return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
2441 size_t AllocatedSinceLastGC() {
2442 intptr_t allocated = top() - to_space_.age_mark();
2443 if (allocated < 0) {
2444 // Runtime has lowered the top below the age mark.
2447 // Correctly account for non-allocatable regions at the beginning of
2448 // each page from the age_mark() to the top().
2450 PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
2451 allocated -= pages * (NewSpacePage::kObjectStartOffset);
2452 DCHECK(0 <= allocated && allocated <= Size());
2453 return static_cast<size_t>(allocated);
2456 // Return the maximum capacity of a semispace.
2457 int MaximumCapacity() {
2458 DCHECK(to_space_.MaximumTotalCapacity() ==
2459 from_space_.MaximumTotalCapacity());
2460 return to_space_.MaximumTotalCapacity();
2463 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2465 // Returns the initial capacity of a semispace.
2466 int InitialTotalCapacity() {
2467 DCHECK(to_space_.InitialTotalCapacity() ==
2468 from_space_.InitialTotalCapacity());
2469 return to_space_.InitialTotalCapacity();
2472 // Return the address of the allocation pointer in the active semispace.
2474 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2475 return allocation_info_.top();
2478 void set_top(Address top) {
2479 DCHECK(to_space_.current_page()->ContainsLimit(top));
2480 allocation_info_.set_top(top);
2483 // Return the address of the allocation pointer limit in the active semispace.
2485 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2486 return allocation_info_.limit();
2489 // Return the address of the first object in the active semispace.
2490 Address bottom() { return to_space_.space_start(); }
2492 // Get the age mark of the inactive semispace.
2493 Address age_mark() { return from_space_.age_mark(); }
2494 // Set the age mark in the active semispace.
2495 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2497 // The start address of the space and a bit mask. Anding an address in the
2498 // new space with the mask will result in the start address.
2499 Address start() { return start_; }
2500 uintptr_t mask() { return address_mask_; }
2502 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2503 DCHECK(Contains(addr));
2504 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
2505 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2506 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2509 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2510 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2513 // The allocation top and limit address.
2514 Address* allocation_top_address() { return allocation_info_.top_address(); }
2516 // The allocation limit address.
2517 Address* allocation_limit_address() {
2518 return allocation_info_.limit_address();
2521 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2522 int size_in_bytes, AllocationAlignment alignment));
2524 MUST_USE_RESULT INLINE(
2525 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2527 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2528 int size_in_bytes, AllocationAlignment alignment));
2530 // Reset the allocation pointer to the beginning of the active semispace.
2531 void ResetAllocationInfo();
2533 void UpdateInlineAllocationLimit(int size_in_bytes);
2534 void LowerInlineAllocationLimit(intptr_t step) {
2535 inline_allocation_limit_step_ = step;
2536 UpdateInlineAllocationLimit(0);
2537 top_on_previous_step_ = allocation_info_.top();
2540 // Get the extent of the inactive semispace (for use as a marking stack,
2541 // or to zap it). Notice: space-addresses are not necessarily on the
2542 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2543 Address FromSpacePageLow() { return from_space_.page_low(); }
2544 Address FromSpacePageHigh() { return from_space_.page_high(); }
2545 Address FromSpaceStart() { return from_space_.space_start(); }
2546 Address FromSpaceEnd() { return from_space_.space_end(); }
2548 // Get the extent of the active semispace's pages' memory.
2549 Address ToSpaceStart() { return to_space_.space_start(); }
2550 Address ToSpaceEnd() { return to_space_.space_end(); }
2552 inline bool ToSpaceContains(Address address) {
2553 return to_space_.Contains(address);
2555 inline bool FromSpaceContains(Address address) {
2556 return from_space_.Contains(address);
2559 // True if the object is a heap object in the address range of the
2560 // respective semispace (not necessarily below the allocation pointer of the
2562 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2563 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2565 // Try to switch the active semispace to a new, empty, page.
2566 // Returns false if this isn't possible or reasonable (i.e., there
2567 // are no pages, or the current page is already empty), or true
2569 bool AddFreshPage();
2572 // Verify the active semispace.
2573 virtual void Verify();
2577 // Print the active semispace.
2578 void Print() override { to_space_.Print(); }
2581 // Iterates the active semispace to collect statistics.
2582 void CollectStatistics();
2583 // Reports previously collected statistics of the active semispace.
2584 void ReportStatistics();
2585 // Clears previously collected statistics.
2586 void ClearHistograms();
2588 // Record the allocation or promotion of a heap object. Note that we don't
2589 // record every single allocation, but only those that happen in the
2590 // to space during a scavenge GC.
2591 void RecordAllocation(HeapObject* obj);
2592 void RecordPromotion(HeapObject* obj);
2594 // Return whether the operation succeded.
2595 bool CommitFromSpaceIfNeeded() {
2596 if (from_space_.is_committed()) return true;
2597 return from_space_.Commit();
2600 bool UncommitFromSpace() {
2601 if (!from_space_.is_committed()) return true;
2602 return from_space_.Uncommit();
2605 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2607 inline intptr_t inline_allocation_limit_step() {
2608 return inline_allocation_limit_step_;
2611 SemiSpace* active_space() { return &to_space_; }
2614 // Update allocation info to match the current to-space page.
2615 void UpdateAllocationInfo();
2617 Address chunk_base_;
2618 uintptr_t chunk_size_;
2621 SemiSpace to_space_;
2622 SemiSpace from_space_;
2623 base::VirtualMemory reservation_;
2626 // Start address and bit mask for containment testing.
2628 uintptr_t address_mask_;
2629 uintptr_t object_mask_;
2630 uintptr_t object_expected_;
2632 // Allocation pointer and limit for normal allocation and allocation during
2633 // mark-compact collection.
2634 AllocationInfo allocation_info_;
2636 // When incremental marking is active we will set allocation_info_.limit
2637 // to be lower than actual limit and then will gradually increase it
2638 // in steps to guarantee that we do incremental marking steps even
2639 // when all allocation is performed from inlined generated code.
2640 intptr_t inline_allocation_limit_step_;
2642 Address top_on_previous_step_;
2644 HistogramInfo* allocated_histogram_;
2645 HistogramInfo* promoted_histogram_;
2647 MUST_USE_RESULT AllocationResult
2648 SlowAllocateRaw(int size_in_bytes, AllocationAlignment alignment);
2650 friend class SemiSpaceIterator;
2654 // -----------------------------------------------------------------------------
2655 // Old object space (includes the old space of objects and code space)
2657 class OldSpace : public PagedSpace {
2659 // Creates an old space object with a given maximum capacity.
2660 // The constructor does not allocate pages from OS.
2661 OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
2662 Executability executable)
2663 : PagedSpace(heap, max_capacity, id, executable) {}
2667 // For contiguous spaces, top should be in the space (or at the end) and limit
2668 // should be the end of the space.
2669 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2670 SLOW_DCHECK((space).page_low() <= (info).top() && \
2671 (info).top() <= (space).page_high() && \
2672 (info).limit() <= (space).page_high())
2675 // -----------------------------------------------------------------------------
2676 // Old space for all map objects
2678 class MapSpace : public PagedSpace {
2680 // Creates a map space object with a maximum capacity.
2681 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2682 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2683 max_map_space_pages_(kMaxMapPageIndex - 1) {}
2685 // Given an index, returns the page address.
2686 // TODO(1600): this limit is artifical just to keep code compilable
2687 static const int kMaxMapPageIndex = 1 << 16;
2689 virtual int RoundSizeDownToObjectAlignment(int size) {
2690 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2691 return RoundDown(size, Map::kSize);
2693 return (size / Map::kSize) * Map::kSize;
2698 virtual void VerifyObject(HeapObject* obj);
2701 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2703 // Do map space compaction if there is a page gap.
2704 int CompactionThreshold() {
2705 return kMapsPerPage * (max_map_space_pages_ - 1);
2708 const int max_map_space_pages_;
2712 // -----------------------------------------------------------------------------
2713 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2714 // the large object space. A large object is allocated from OS heap with
2715 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2716 // A large object always starts at Page::kObjectStartOffset to a page.
2717 // Large objects do not move during garbage collections.
2719 class LargeObjectSpace : public Space {
2721 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2722 virtual ~LargeObjectSpace() {}
2724 // Initializes internal data structures.
2727 // Releases internal resources, frees objects in this space.
2730 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2731 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2732 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2735 // Shared implementation of AllocateRaw, AllocateRawCode and
2736 // AllocateRawFixedArray.
2737 MUST_USE_RESULT AllocationResult
2738 AllocateRaw(int object_size, Executability executable);
2740 bool CanAllocateSize(int size) { return Size() + size <= max_capacity_; }
2742 // Available bytes for objects in this space.
2743 inline intptr_t Available() override;
2745 intptr_t Size() override { return size_; }
2747 intptr_t SizeOfObjects() override { return objects_size_; }
2749 intptr_t MaximumCommittedMemory() { return maximum_committed_; }
2751 intptr_t CommittedMemory() override { return Size(); }
2753 // Approximate amount of physical memory committed for this space.
2754 size_t CommittedPhysicalMemory() override;
2756 int PageCount() { return page_count_; }
2758 // Finds an object for a given address, returns a Smi if it is not found.
2759 // The function iterates through all objects in this space, may be slow.
2760 Object* FindObject(Address a);
2762 // Finds a large object page containing the given address, returns NULL
2763 // if such a page doesn't exist.
2764 LargePage* FindPage(Address a);
2766 // Frees unmarked objects.
2767 void FreeUnmarkedObjects();
2769 // Checks whether a heap object is in this space; O(1).
2770 bool Contains(HeapObject* obj);
2772 // Checks whether the space is empty.
2773 bool IsEmpty() { return first_page_ == NULL; }
2775 LargePage* first_page() { return first_page_; }
2778 virtual void Verify();
2782 void Print() override;
2783 void ReportStatistics();
2784 void CollectCodeStatistics();
2786 // Checks whether an address is in the object area in this space. It
2787 // iterates all objects in the space. May be slow.
2788 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2791 intptr_t max_capacity_;
2792 intptr_t maximum_committed_;
2793 // The head of the linked list of large object chunks.
2794 LargePage* first_page_;
2795 intptr_t size_; // allocated bytes
2796 int page_count_; // number of chunks
2797 intptr_t objects_size_; // size of objects
2798 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2801 friend class LargeObjectIterator;
2805 class LargeObjectIterator : public ObjectIterator {
2807 explicit LargeObjectIterator(LargeObjectSpace* space);
2808 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2812 // implementation of ObjectIterator.
2813 virtual HeapObject* next_object() { return Next(); }
2816 LargePage* current_;
2817 HeapObjectCallback size_func_;
2821 // Iterates over the chunks (pages and large object pages) that can contain
2822 // pointers to new space.
2823 class PointerChunkIterator BASE_EMBEDDED {
2825 inline explicit PointerChunkIterator(Heap* heap);
2827 // Return NULL when the iterator is done.
2828 MemoryChunk* next() {
2830 case kOldSpaceState: {
2831 if (old_iterator_.has_next()) {
2832 return old_iterator_.next();
2838 if (map_iterator_.has_next()) {
2839 return map_iterator_.next();
2841 state_ = kLargeObjectState;
2844 case kLargeObjectState: {
2845 HeapObject* heap_object;
2847 heap_object = lo_iterator_.Next();
2848 if (heap_object == NULL) {
2849 state_ = kFinishedState;
2852 // Fixed arrays are the only pointer-containing objects in large
2854 } while (!heap_object->IsFixedArray());
2855 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2858 case kFinishedState:
2869 enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
2871 PageIterator old_iterator_;
2872 PageIterator map_iterator_;
2873 LargeObjectIterator lo_iterator_;
2878 struct CommentStatistic {
2879 const char* comment;
2887 // Must be small, since an iteration is used for lookup.
2888 static const int kMaxComments = 64;
2892 } // namespace v8::internal
2894 #endif // V8_HEAP_SPACES_H_