1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "allocation.h"
35 #include "platform/mutex.h"
43 // -----------------------------------------------------------------------------
46 // A JS heap consists of a young generation, an old generation, and a large
47 // object space. The young generation is divided into two semispaces. A
48 // scavenger implements Cheney's copying algorithm. The old generation is
49 // separated into a map space and an old object space. The map space contains
50 // all (and only) map objects, the rest of old objects go into the old space.
51 // The old generation is collected by a mark-sweep-compact collector.
53 // The semispaces of the young generation are contiguous. The old and map
54 // spaces consists of a list of pages. A page has a page header and an object
57 // There is a separate large object space for objects larger than
58 // Page::kMaxHeapObjectSize, so that they do not have to move during
59 // collection. The large object space is paged. Pages in large object space
60 // may be larger than the page size.
62 // A store-buffer based write barrier is used to keep track of intergenerational
63 // references. See store-buffer.h.
65 // During scavenges and mark-sweep collections we sometimes (after a store
66 // buffer overflow) iterate intergenerational pointers without decoding heap
67 // object maps so if the page belongs to old pointer space or large object
68 // space it is essential to guarantee that the page does not contain any
69 // garbage pointers to new space: every pointer aligned word which satisfies
70 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
71 // new space. Thus objects in old pointer and large object spaces should have a
72 // special layout (e.g. no bare integer fields). This requirement does not
73 // apply to map space which is iterated in a special fashion. However we still
74 // require pointer fields of dead maps to be cleaned.
76 // To enable lazy cleaning of old space pages we can mark chunks of the page
77 // as being garbage. Garbage sections are marked with a special map. These
78 // sections are skipped when scanning the page, even if we are otherwise
79 // scanning without regard for object boundaries. Garbage sections are chained
80 // together to form a free list after a GC. Garbage sections created outside
81 // of GCs by object trunctation etc. may not be in the free list chain. Very
82 // small free spaces are ignored, they need only be cleaned of bogus pointers
85 // Each page may have up to one special garbage section. The start of this
86 // section is denoted by the top field in the space. The end of the section
87 // is denoted by the limit field in the space. This special garbage section
88 // is not marked with a free space map in the data. The point of this section
89 // is to enable linear allocation without having to constantly update the byte
90 // array every time the top field is updated and a new object is created. The
91 // special garbage section is not in the chain of garbage sections.
93 // Since the top and limit fields are in the space, not the page, only one page
94 // has a special garbage section, and if the top and limit are equal then there
95 // is no special garbage section.
97 // Some assertion macros used in the debugging mode.
99 #define ASSERT_PAGE_ALIGNED(address) \
100 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
102 #define ASSERT_OBJECT_ALIGNED(address) \
103 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
105 #define ASSERT_OBJECT_SIZE(size) \
106 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
108 #define ASSERT_PAGE_OFFSET(offset) \
109 ASSERT((Page::kObjectStartOffset <= offset) \
110 && (offset <= Page::kPageSize))
112 #define ASSERT_MAP_PAGE_INDEX(index) \
113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
117 class MemoryAllocator;
118 class AllocationInfo;
125 typedef uint32_t CellType;
127 inline MarkBit(CellType* cell, CellType mask, bool data_only)
128 : cell_(cell), mask_(mask), data_only_(data_only) { }
130 inline CellType* cell() { return cell_; }
131 inline CellType mask() { return mask_; }
134 bool operator==(const MarkBit& other) {
135 return cell_ == other.cell_ && mask_ == other.mask_;
139 inline void Set() { *cell_ |= mask_; }
140 inline bool Get() { return (*cell_ & mask_) != 0; }
141 inline void Clear() { *cell_ &= ~mask_; }
143 inline bool data_only() { return data_only_; }
145 inline MarkBit Next() {
146 CellType new_mask = mask_ << 1;
148 return MarkBit(cell_ + 1, 1, data_only_);
150 return MarkBit(cell_, new_mask, data_only_);
157 // This boolean indicates that the object is in a data-only space with no
158 // pointers. This enables some optimizations when marking.
159 // It is expected that this field is inlined and turned into control flow
160 // at the place where the MarkBit object is created.
165 // Bitmap is a sequence of cells each containing fixed number of bits.
168 static const uint32_t kBitsPerCell = 32;
169 static const uint32_t kBitsPerCellLog2 = 5;
170 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
171 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
172 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
174 static const size_t kLength =
175 (1 << kPageSizeBits) >> (kPointerSizeLog2);
177 static const size_t kSize =
178 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
181 static int CellsForLength(int length) {
182 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
186 return CellsForLength(kLength);
189 static int SizeFor(int cells_count) {
190 return sizeof(MarkBit::CellType) * cells_count;
193 INLINE(static uint32_t IndexToCell(uint32_t index)) {
194 return index >> kBitsPerCellLog2;
197 INLINE(static uint32_t CellToIndex(uint32_t index)) {
198 return index << kBitsPerCellLog2;
201 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
202 return (index + kBitIndexMask) & ~kBitIndexMask;
205 INLINE(MarkBit::CellType* cells()) {
206 return reinterpret_cast<MarkBit::CellType*>(this);
209 INLINE(Address address()) {
210 return reinterpret_cast<Address>(this);
213 INLINE(static Bitmap* FromAddress(Address addr)) {
214 return reinterpret_cast<Bitmap*>(addr);
217 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
218 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
219 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
220 return MarkBit(cell, mask, data_only);
223 static inline void Clear(MemoryChunk* chunk);
225 static void PrintWord(uint32_t word, uint32_t himask = 0) {
226 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
227 if ((mask & himask) != 0) PrintF("[");
228 PrintF((mask & word) ? "1" : "0");
229 if ((mask & himask) != 0) PrintF("]");
235 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
237 void Print(uint32_t pos, uint32_t cell) {
238 if (cell == seq_type) {
258 if (seq_length > 0) {
259 PrintF("%d: %dx%d\n",
261 seq_type == 0 ? 0 : 1,
262 seq_length * kBitsPerCell);
267 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
277 for (int i = 0; i < CellsCount(); i++) {
278 printer.Print(i, cells()[i]);
285 for (int i = 0; i < CellsCount(); i++) {
286 if (cells()[i] != 0) {
298 // MemoryChunk represents a memory region owned by a specific space.
299 // It is divided into the header and the body. Chunk start is always
300 // 1MB aligned. Start of the body is aligned so it can accommodate
304 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
305 static MemoryChunk* FromAddress(Address a) {
306 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
309 // Only works for addresses in pointer spaces, not data or code spaces.
310 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
312 Address address() { return reinterpret_cast<Address>(this); }
314 bool is_valid() { return address() != NULL; }
316 MemoryChunk* next_chunk() const { return next_chunk_; }
317 MemoryChunk* prev_chunk() const { return prev_chunk_; }
319 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; }
320 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; }
322 Space* owner() const {
323 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
325 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
332 void set_owner(Space* space) {
333 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
334 owner_ = reinterpret_cast<Address>(space) + kFailureTag;
335 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
339 VirtualMemory* reserved_memory() {
340 return &reservation_;
343 void InitializeReservedMemory() {
344 reservation_.Reset();
347 void set_reserved_memory(VirtualMemory* reservation) {
348 ASSERT_NOT_NULL(reservation);
349 reservation_.TakeControl(reservation);
352 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
353 void initialize_scan_on_scavenge(bool scan) {
355 SetFlag(SCAN_ON_SCAVENGE);
357 ClearFlag(SCAN_ON_SCAVENGE);
360 inline void set_scan_on_scavenge(bool scan);
362 int store_buffer_counter() { return store_buffer_counter_; }
363 void set_store_buffer_counter(int counter) {
364 store_buffer_counter_ = counter;
367 bool Contains(Address addr) {
368 return addr >= area_start() && addr < area_end();
371 // Checks whether addr can be a limit of addresses in this page.
372 // It's a limit if it's in the page, or if it's just after the
373 // last byte of the page.
374 bool ContainsLimit(Address addr) {
375 return addr >= area_start() && addr <= area_end();
378 // Every n write barrier invocations we go to runtime even though
379 // we could have handled it in generated code. This lets us check
380 // whether we have hit the limit and should do some more marking.
381 static const int kWriteBarrierCounterGranularity = 500;
383 enum MemoryChunkFlags {
386 POINTERS_TO_HERE_ARE_INTERESTING,
387 POINTERS_FROM_HERE_ARE_INTERESTING,
389 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
390 IN_TO_SPACE, // All pages in new space has one of these two set.
391 NEW_SPACE_BELOW_AGE_MARK,
393 EVACUATION_CANDIDATE,
394 RESCAN_ON_EVACUATION,
396 // Pages swept precisely can be iterated, hitting only the live objects.
397 // Whereas those swept conservatively cannot be iterated over. Both flags
398 // indicate that marking bits have been cleared by the sweeper, otherwise
399 // marking bits are still intact.
401 WAS_SWEPT_CONSERVATIVELY,
403 // Large objects can have a progress bar in their page header. These object
404 // are scanned in increments and will be kept black while being scanned.
405 // Even if the mutator writes to them they will be kept black and a white
406 // to grey transition is performed in the value.
409 // Last flag, keep at bottom.
410 NUM_MEMORY_CHUNK_FLAGS
414 static const int kPointersToHereAreInterestingMask =
415 1 << POINTERS_TO_HERE_ARE_INTERESTING;
417 static const int kPointersFromHereAreInterestingMask =
418 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
420 static const int kEvacuationCandidateMask =
421 1 << EVACUATION_CANDIDATE;
423 static const int kSkipEvacuationSlotsRecordingMask =
424 (1 << EVACUATION_CANDIDATE) |
425 (1 << RESCAN_ON_EVACUATION) |
426 (1 << IN_FROM_SPACE) |
430 void SetFlag(int flag) {
431 flags_ |= static_cast<uintptr_t>(1) << flag;
434 void ClearFlag(int flag) {
435 flags_ &= ~(static_cast<uintptr_t>(1) << flag);
438 void SetFlagTo(int flag, bool value) {
446 bool IsFlagSet(int flag) {
447 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
450 // Set or clear multiple flags at a time. The flags in the mask
451 // are set to the value in "flags", the rest retain the current value
453 void SetFlags(intptr_t flags, intptr_t mask) {
454 flags_ = (flags_ & ~mask) | (flags & mask);
457 // Return all current flags.
458 intptr_t GetFlags() { return flags_; }
460 intptr_t parallel_sweeping() const {
461 return parallel_sweeping_;
464 void set_parallel_sweeping(intptr_t state) {
465 parallel_sweeping_ = state;
468 bool TryParallelSweeping() {
469 return NoBarrier_CompareAndSwap(¶llel_sweeping_, 1, 0) == 1;
472 // Manage live byte count (count of bytes known to be live,
473 // because they are marked black).
474 void ResetLiveBytes() {
475 if (FLAG_gc_verbose) {
476 PrintF("ResetLiveBytes:%p:%x->0\n",
477 static_cast<void*>(this), live_byte_count_);
479 live_byte_count_ = 0;
481 void IncrementLiveBytes(int by) {
482 if (FLAG_gc_verbose) {
483 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
484 static_cast<void*>(this), live_byte_count_,
485 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
486 live_byte_count_ + by);
488 live_byte_count_ += by;
489 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
492 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
493 return live_byte_count_;
496 int write_barrier_counter() {
497 return static_cast<int>(write_barrier_counter_);
500 void set_write_barrier_counter(int counter) {
501 write_barrier_counter_ = counter;
505 ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
506 return progress_bar_;
509 void set_progress_bar(int progress_bar) {
510 ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
511 progress_bar_ = progress_bar;
514 void ResetProgressBar() {
515 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
517 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
521 bool IsLeftOfProgressBar(Object** slot) {
522 Address slot_address = reinterpret_cast<Address>(slot);
523 ASSERT(slot_address > this->address());
524 return (slot_address - (this->address() + kObjectStartOffset)) <
528 static void IncrementLiveBytesFromGC(Address address, int by) {
529 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
532 static void IncrementLiveBytesFromMutator(Address address, int by);
534 static const intptr_t kAlignment =
535 (static_cast<uintptr_t>(1) << kPageSizeBits);
537 static const intptr_t kAlignmentMask = kAlignment - 1;
539 static const intptr_t kSizeOffset = kPointerSize + kPointerSize;
541 static const intptr_t kLiveBytesOffset =
542 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
543 kPointerSize + kPointerSize +
544 kPointerSize + kPointerSize + kPointerSize + kIntSize;
546 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
548 static const size_t kWriteBarrierCounterOffset =
549 kSlotsBufferOffset + kPointerSize + kPointerSize;
551 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
552 kIntSize + kIntSize + kPointerSize +
555 static const int kBodyOffset =
556 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
558 // The start offset of the object area in a page. Aligned to both maps and
559 // code alignment to be suitable for both. Also aligned to 32 words because
560 // the marking bitmap is arranged in 32 bit chunks.
561 static const int kObjectStartAlignment = 32 * kPointerSize;
562 static const int kObjectStartOffset = kBodyOffset - 1 +
563 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
565 size_t size() const { return size_; }
567 void set_size(size_t size) {
571 void SetArea(Address area_start, Address area_end) {
572 area_start_ = area_start;
573 area_end_ = area_end;
576 Executability executable() {
577 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
580 bool ContainsOnlyData() {
581 return IsFlagSet(CONTAINS_ONLY_DATA);
585 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
589 return IsFlagSet(IN_TO_SPACE);
593 return IsFlagSet(IN_FROM_SPACE);
596 // ---------------------------------------------------------------------
599 inline Bitmap* markbits() {
600 return Bitmap::FromAddress(address() + kHeaderSize);
603 void PrintMarkbits() { markbits()->Print(); }
605 inline uint32_t AddressToMarkbitIndex(Address addr) {
606 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
609 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
610 const intptr_t offset =
611 reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
613 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
616 inline Address MarkbitIndexToAddress(uint32_t index) {
617 return this->address() + (index << kPointerSizeLog2);
620 void InsertAfter(MemoryChunk* other);
623 inline Heap* heap() { return heap_; }
625 static const int kFlagsOffset = kPointerSize * 3;
627 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
629 bool ShouldSkipEvacuationSlotRecording() {
630 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
633 inline SkipList* skip_list() {
637 inline void set_skip_list(SkipList* skip_list) {
638 skip_list_ = skip_list;
641 inline SlotsBuffer* slots_buffer() {
642 return slots_buffer_;
645 inline SlotsBuffer** slots_buffer_address() {
646 return &slots_buffer_;
649 void MarkEvacuationCandidate() {
650 ASSERT(slots_buffer_ == NULL);
651 SetFlag(EVACUATION_CANDIDATE);
654 void ClearEvacuationCandidate() {
655 ASSERT(slots_buffer_ == NULL);
656 ClearFlag(EVACUATION_CANDIDATE);
659 Address area_start() { return area_start_; }
660 Address area_end() { return area_end_; }
662 return static_cast<int>(area_end() - area_start());
664 bool CommitArea(size_t requested);
666 // Approximate amount of physical memory committed for this chunk.
667 size_t CommittedPhysicalMemory() {
668 return high_water_mark_;
671 static inline void UpdateHighWaterMark(Address mark);
674 MemoryChunk* next_chunk_;
675 MemoryChunk* prev_chunk_;
679 // Start and end of allocatable memory on this chunk.
683 // If the chunk needs to remember its memory reservation, it is stored here.
684 VirtualMemory reservation_;
685 // The identity of the owning space. This is tagged as a failure pointer, but
686 // no failure can be in an object, so this can be distinguished from any entry
690 // Used by the store buffer to keep track of which pages to mark scan-on-
692 int store_buffer_counter_;
693 // Count of bytes marked black on page.
694 int live_byte_count_;
695 SlotsBuffer* slots_buffer_;
696 SkipList* skip_list_;
697 intptr_t write_barrier_counter_;
698 // Used by the incremental marker to keep track of the scanning progress in
699 // large objects that have a progress bar and are scanned in increments.
701 // Assuming the initial allocation on a page is sequential,
702 // count highest number of bytes ever allocated on the page.
703 int high_water_mark_;
705 intptr_t parallel_sweeping_;
707 // PagedSpace free-list statistics.
708 intptr_t available_in_small_free_list_;
709 intptr_t available_in_medium_free_list_;
710 intptr_t available_in_large_free_list_;
711 intptr_t available_in_huge_free_list_;
712 intptr_t non_available_small_blocks_;
714 static MemoryChunk* Initialize(Heap* heap,
719 Executability executable,
722 friend class MemoryAllocator;
726 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
729 // -----------------------------------------------------------------------------
730 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
732 // The only way to get a page pointer is by calling factory methods:
733 // Page* p = Page::FromAddress(addr); or
734 // Page* p = Page::FromAllocationTop(top);
735 class Page : public MemoryChunk {
737 // Returns the page containing a given address. The address ranges
738 // from [page_addr .. page_addr + kPageSize[
739 // This only works if the object is in fact in a page. See also MemoryChunk::
740 // FromAddress() and FromAnyAddress().
741 INLINE(static Page* FromAddress(Address a)) {
742 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
745 // Returns the page containing an allocation top. Because an allocation
746 // top address can be the upper bound of the page, we need to subtract
747 // it with kPointerSize first. The address ranges from
748 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
749 INLINE(static Page* FromAllocationTop(Address top)) {
750 Page* p = FromAddress(top - kPointerSize);
754 // Returns the next page in the chain of pages owned by a space.
755 inline Page* next_page();
756 inline Page* prev_page();
757 inline void set_next_page(Page* page);
758 inline void set_prev_page(Page* page);
760 // Checks whether an address is page aligned.
761 static bool IsAlignedToPageSize(Address a) {
762 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
765 // Returns the offset of a given address to this page.
766 INLINE(int Offset(Address a)) {
767 int offset = static_cast<int>(a - address());
771 // Returns the address for a given offset to the this page.
772 Address OffsetToAddress(int offset) {
773 ASSERT_PAGE_OFFSET(offset);
774 return address() + offset;
777 // ---------------------------------------------------------------------
779 // Page size in bytes. This must be a multiple of the OS page size.
780 static const int kPageSize = 1 << kPageSizeBits;
782 // Maximum object size that fits in a page. Objects larger than that size
783 // are allocated in large object space and are never moved in memory. This
784 // also applies to new space allocation, since objects are never migrated
785 // from new space to large object space. Takes double alignment into account.
786 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
789 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
791 inline void ClearGCFields();
793 static inline Page* Initialize(Heap* heap,
795 Executability executable,
798 void InitializeAsAnchor(PagedSpace* owner);
800 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
801 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
802 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
804 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
805 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
807 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
808 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
810 void ResetFreeListStatistics();
812 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
813 type name() { return name##_; } \
814 void set_##name(type name) { name##_ = name; } \
815 void add_##name(type name) { name##_ += name; }
817 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
818 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
819 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
820 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
821 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
823 #undef FRAGMENTATION_STATS_ACCESSORS
829 friend class MemoryAllocator;
833 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
836 class LargePage : public MemoryChunk {
838 HeapObject* GetObject() {
839 return HeapObject::FromAddress(area_start());
842 inline LargePage* next_page() const {
843 return static_cast<LargePage*>(next_chunk());
846 inline void set_next_page(LargePage* page) {
847 set_next_chunk(page);
850 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
852 friend class MemoryAllocator;
855 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
857 // ----------------------------------------------------------------------------
858 // Space is the abstract superclass for all allocation spaces.
859 class Space : public Malloced {
861 Space(Heap* heap, AllocationSpace id, Executability executable)
862 : heap_(heap), id_(id), executable_(executable) {}
866 Heap* heap() const { return heap_; }
868 // Does the space need executable memory?
869 Executability executable() { return executable_; }
871 // Identity used in error reporting.
872 AllocationSpace identity() { return id_; }
874 // Returns allocated size.
875 virtual intptr_t Size() = 0;
877 // Returns size of objects. Can differ from the allocated size
878 // (e.g. see LargeObjectSpace).
879 virtual intptr_t SizeOfObjects() { return Size(); }
881 virtual int RoundSizeDownToObjectAlignment(int size) {
882 if (id_ == CODE_SPACE) {
883 return RoundDown(size, kCodeAlignment);
885 return RoundDown(size, kPointerSize);
890 virtual void Print() = 0;
896 Executability executable_;
900 // ----------------------------------------------------------------------------
901 // All heap objects containing executable code (code objects) must be allocated
902 // from a 2 GB range of memory, so that they can call each other using 32-bit
903 // displacements. This happens automatically on 32-bit platforms, where 32-bit
904 // displacements cover the entire 4GB virtual address space. On 64-bit
905 // platforms, we support this using the CodeRange object, which reserves and
906 // manages a range of virtual memory.
909 explicit CodeRange(Isolate* isolate);
910 ~CodeRange() { TearDown(); }
912 // Reserves a range of virtual memory, but does not commit any of it.
913 // Can only be called once, at heap initialization time.
914 // Returns false on failure.
915 bool SetUp(const size_t requested_size);
917 // Frees the range of virtual memory, and frees the data structures used to
921 bool exists() { return this != NULL && code_range_ != NULL; }
923 if (this == NULL || code_range_ == NULL) return NULL;
924 return static_cast<Address>(code_range_->address());
926 bool contains(Address address) {
927 if (this == NULL || code_range_ == NULL) return false;
928 Address start = static_cast<Address>(code_range_->address());
929 return start <= address && address < start + code_range_->size();
932 // Allocates a chunk of memory from the large-object portion of
933 // the code range. On platforms with no separate code range, should
935 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
936 const size_t commit_size,
938 bool CommitRawMemory(Address start, size_t length);
939 bool UncommitRawMemory(Address start, size_t length);
940 void FreeRawMemory(Address buf, size_t length);
945 // The reserved range of virtual memory that all code objects are put in.
946 VirtualMemory* code_range_;
947 // Plain old data class, just a struct plus a constructor.
950 FreeBlock(Address start_arg, size_t size_arg)
951 : start(start_arg), size(size_arg) {
952 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
953 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
955 FreeBlock(void* start_arg, size_t size_arg)
956 : start(static_cast<Address>(start_arg)), size(size_arg) {
957 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
958 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
965 // Freed blocks of memory are added to the free list. When the allocation
966 // list is exhausted, the free list is sorted and merged to make the new
968 List<FreeBlock> free_list_;
969 // Memory is allocated from the free blocks on the allocation list.
970 // The block at current_allocation_block_index_ is the current block.
971 List<FreeBlock> allocation_list_;
972 int current_allocation_block_index_;
974 // Finds a block on the allocation list that contains at least the
975 // requested amount of memory. If none is found, sorts and merges
976 // the existing free memory blocks, and searches again.
977 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
978 void GetNextAllocationBlock(size_t requested);
979 // Compares the start addresses of two free blocks.
980 static int CompareFreeBlockAddress(const FreeBlock* left,
981 const FreeBlock* right);
983 DISALLOW_COPY_AND_ASSIGN(CodeRange);
994 for (int idx = 0; idx < kSize; idx++) {
995 starts_[idx] = reinterpret_cast<Address>(-1);
999 Address StartFor(Address addr) {
1000 return starts_[RegionNumber(addr)];
1003 void AddObject(Address addr, int size) {
1004 int start_region = RegionNumber(addr);
1005 int end_region = RegionNumber(addr + size - kPointerSize);
1006 for (int idx = start_region; idx <= end_region; idx++) {
1007 if (starts_[idx] > addr) starts_[idx] = addr;
1011 static inline int RegionNumber(Address addr) {
1012 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1015 static void Update(Address addr, int size) {
1016 Page* page = Page::FromAddress(addr);
1017 SkipList* list = page->skip_list();
1019 list = new SkipList();
1020 page->set_skip_list(list);
1023 list->AddObject(addr, size);
1027 static const int kRegionSizeLog2 = 13;
1028 static const int kRegionSize = 1 << kRegionSizeLog2;
1029 static const int kSize = Page::kPageSize / kRegionSize;
1031 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1033 Address starts_[kSize];
1037 // ----------------------------------------------------------------------------
1038 // A space acquires chunks of memory from the operating system. The memory
1039 // allocator allocated and deallocates pages for the paged heap spaces and large
1040 // pages for large object space.
1042 // Each space has to manage it's own pages.
1044 class MemoryAllocator {
1046 explicit MemoryAllocator(Isolate* isolate);
1048 // Initializes its internal bookkeeping structures.
1049 // Max capacity of the total space and executable memory limit.
1050 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1055 intptr_t size, PagedSpace* owner, Executability executable);
1057 LargePage* AllocateLargePage(
1058 intptr_t object_size, Space* owner, Executability executable);
1060 void Free(MemoryChunk* chunk);
1062 // Returns the maximum available bytes of heaps.
1063 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1065 // Returns allocated spaces in bytes.
1066 intptr_t Size() { return size_; }
1068 // Returns the maximum available executable bytes of heaps.
1069 intptr_t AvailableExecutable() {
1070 if (capacity_executable_ < size_executable_) return 0;
1071 return capacity_executable_ - size_executable_;
1074 // Returns allocated executable spaces in bytes.
1075 intptr_t SizeExecutable() { return size_executable_; }
1077 // Returns maximum available bytes that the old space can have.
1078 intptr_t MaxAvailable() {
1079 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1082 // Returns an indication of whether a pointer is in a space that has
1083 // been allocated by this MemoryAllocator.
1084 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1085 return address < lowest_ever_allocated_ ||
1086 address >= highest_ever_allocated_;
1090 // Reports statistic info of the space.
1091 void ReportStatistics();
1094 // Returns a MemoryChunk in which the memory region from commit_area_size to
1095 // reserve_area_size of the chunk area is reserved but not committed, it
1096 // could be committed later by calling MemoryChunk::CommitArea.
1097 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1098 intptr_t commit_area_size,
1099 Executability executable,
1102 Address ReserveAlignedMemory(size_t requested,
1104 VirtualMemory* controller);
1105 Address AllocateAlignedMemory(size_t reserve_size,
1108 Executability executable,
1109 VirtualMemory* controller);
1111 bool CommitMemory(Address addr, size_t size, Executability executable);
1113 void FreeMemory(VirtualMemory* reservation, Executability executable);
1114 void FreeMemory(Address addr, size_t size, Executability executable);
1116 // Commit a contiguous block of memory from the initial chunk. Assumes that
1117 // the address is not NULL, the size is greater than zero, and that the
1118 // block is contained in the initial chunk. Returns true if it succeeded
1119 // and false otherwise.
1120 bool CommitBlock(Address start, size_t size, Executability executable);
1122 // Uncommit a contiguous block of memory [start..(start+size)[.
1123 // start is not NULL, the size is greater than zero, and the
1124 // block is contained in the initial chunk. Returns true if it succeeded
1125 // and false otherwise.
1126 bool UncommitBlock(Address start, size_t size);
1128 // Zaps a contiguous block of memory [start..(start+size)[ thus
1129 // filling it up with a recognizable non-NULL bit pattern.
1130 void ZapBlock(Address start, size_t size);
1132 void PerformAllocationCallback(ObjectSpace space,
1133 AllocationAction action,
1136 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1138 AllocationAction action);
1140 void RemoveMemoryAllocationCallback(
1141 MemoryAllocationCallback callback);
1143 bool MemoryAllocationCallbackRegistered(
1144 MemoryAllocationCallback callback);
1146 static int CodePageGuardStartOffset();
1148 static int CodePageGuardSize();
1150 static int CodePageAreaStartOffset();
1152 static int CodePageAreaEndOffset();
1154 static int CodePageAreaSize() {
1155 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1158 MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1161 size_t reserved_size);
1166 // Maximum space size in bytes.
1168 // Maximum subset of capacity_ that can be executable
1169 size_t capacity_executable_;
1171 // Allocated space size in bytes.
1173 // Allocated executable space size in bytes.
1174 size_t size_executable_;
1176 // We keep the lowest and highest addresses allocated as a quick way
1177 // of determining that pointers are outside the heap. The estimate is
1178 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1179 // to our heap. The range is [lowest, highest[, inclusive on the low end
1180 // and exclusive on the high end.
1181 void* lowest_ever_allocated_;
1182 void* highest_ever_allocated_;
1184 struct MemoryAllocationCallbackRegistration {
1185 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1187 AllocationAction action)
1188 : callback(callback), space(space), action(action) {
1190 MemoryAllocationCallback callback;
1192 AllocationAction action;
1195 // A List of callback that are triggered when memory is allocated or free'd
1196 List<MemoryAllocationCallbackRegistration>
1197 memory_allocation_callbacks_;
1199 // Initializes pages in a chunk. Returns the first page address.
1200 // This function and GetChunkId() are provided for the mark-compact
1201 // collector to rebuild page headers in the from space, which is
1202 // used as a marking stack and its page headers are destroyed.
1203 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1206 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1207 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1208 highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1211 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1215 // -----------------------------------------------------------------------------
1216 // Interface for heap object iterator to be implemented by all object space
1217 // object iterators.
1219 // NOTE: The space specific object iterators also implements the own next()
1220 // method which is used to avoid using virtual functions
1221 // iterating a specific space.
1223 class ObjectIterator : public Malloced {
1225 virtual ~ObjectIterator() { }
1227 virtual HeapObject* next_object() = 0;
1231 // -----------------------------------------------------------------------------
1232 // Heap object iterator in new/old/map spaces.
1234 // A HeapObjectIterator iterates objects from the bottom of the given space
1235 // to its top or from the bottom of the given page to its top.
1237 // If objects are allocated in the page during iteration the iterator may
1238 // or may not iterate over those objects. The caller must create a new
1239 // iterator in order to be sure to visit these new objects.
1240 class HeapObjectIterator: public ObjectIterator {
1242 // Creates a new object iterator in a given space.
1243 // If the size function is not given, the iterator calls the default
1245 explicit HeapObjectIterator(PagedSpace* space);
1246 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1247 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1249 // Advance to the next object, skipping free spaces and other fillers and
1250 // skipping the special garbage section of which there is one per space.
1251 // Returns NULL when the iteration has ended.
1252 inline HeapObject* Next() {
1254 HeapObject* next_obj = FromCurrentPage();
1255 if (next_obj != NULL) return next_obj;
1256 } while (AdvanceToNextPage());
1260 virtual HeapObject* next_object() {
1265 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1267 Address cur_addr_; // Current iteration point.
1268 Address cur_end_; // End iteration point.
1269 HeapObjectCallback size_func_; // Size function or NULL.
1271 PageMode page_mode_;
1273 // Fast (inlined) path of next().
1274 inline HeapObject* FromCurrentPage();
1276 // Slow path of next(), goes into the next page. Returns false if the
1277 // iteration has ended.
1278 bool AdvanceToNextPage();
1280 // Initializes fields.
1281 inline void Initialize(PagedSpace* owner,
1285 HeapObjectCallback size_func);
1289 // -----------------------------------------------------------------------------
1290 // A PageIterator iterates the pages in a paged space.
1292 class PageIterator BASE_EMBEDDED {
1294 explicit inline PageIterator(PagedSpace* space);
1296 inline bool has_next();
1297 inline Page* next();
1301 Page* prev_page_; // Previous page returned.
1302 // Next page that will be returned. Cached here so that we can use this
1303 // iterator for operations that deallocate pages.
1308 // -----------------------------------------------------------------------------
1309 // A space has a circular list of pages. The next page can be accessed via
1310 // Page::next_page() call.
1312 // An abstraction of allocation and relocation pointers in a page-structured
1314 class AllocationInfo {
1316 AllocationInfo() : top_(NULL), limit_(NULL) {
1319 INLINE(void set_top(Address top)) {
1320 SLOW_ASSERT(top == NULL ||
1321 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1325 INLINE(Address top()) const {
1326 SLOW_ASSERT(top_ == NULL ||
1327 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1331 Address* top_address() {
1335 INLINE(void set_limit(Address limit)) {
1336 SLOW_ASSERT(limit == NULL ||
1337 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1341 INLINE(Address limit()) const {
1342 SLOW_ASSERT(limit_ == NULL ||
1343 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
1347 Address* limit_address() {
1352 bool VerifyPagedAllocation() {
1353 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1354 && (top_ <= limit_);
1359 // Current allocation top.
1361 // Current allocation limit.
1366 // An abstraction of the accounting statistics of a page-structured space.
1367 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1368 // including page bookkeeping structures) currently in the space. The 'size'
1369 // of a space is the number of allocated bytes, the 'waste' in the space is
1370 // the number of bytes that are not allocated and not available to
1371 // allocation without reorganizing the space via a GC (e.g. small blocks due
1372 // to internal fragmentation, top of page areas in map space), and the bytes
1373 // 'available' is the number of unallocated bytes that are not waste. The
1374 // capacity is the sum of size, waste, and available.
1376 // The stats are only set by functions that ensure they stay balanced. These
1377 // functions increase or decrease one of the non-capacity stats in
1378 // conjunction with capacity, or else they always balance increases and
1379 // decreases to the non-capacity stats.
1380 class AllocationStats BASE_EMBEDDED {
1382 AllocationStats() { Clear(); }
1384 // Zero out all the allocation statistics (i.e., no capacity).
1392 void ClearSizeWaste() {
1397 // Reset the allocation statistics (i.e., available = capacity with no
1398 // wasted or allocated bytes).
1404 // Accessors for the allocation statistics.
1405 intptr_t Capacity() { return capacity_; }
1406 intptr_t MaxCapacity() { return max_capacity_; }
1407 intptr_t Size() { return size_; }
1408 intptr_t Waste() { return waste_; }
1410 // Grow the space by adding available bytes. They are initially marked as
1411 // being in use (part of the size), but will normally be immediately freed,
1412 // putting them on the free list and removing them from size_.
1413 void ExpandSpace(int size_in_bytes) {
1414 capacity_ += size_in_bytes;
1415 size_ += size_in_bytes;
1416 if (capacity_ > max_capacity_) {
1417 max_capacity_ = capacity_;
1422 // Shrink the space by removing available bytes. Since shrinking is done
1423 // during sweeping, bytes have been marked as being in use (part of the size)
1424 // and are hereby freed.
1425 void ShrinkSpace(int size_in_bytes) {
1426 capacity_ -= size_in_bytes;
1427 size_ -= size_in_bytes;
1431 // Allocate from available bytes (available -> size).
1432 void AllocateBytes(intptr_t size_in_bytes) {
1433 size_ += size_in_bytes;
1437 // Free allocated bytes, making them available (size -> available).
1438 void DeallocateBytes(intptr_t size_in_bytes) {
1439 size_ -= size_in_bytes;
1443 // Waste free bytes (available -> waste).
1444 void WasteBytes(int size_in_bytes) {
1445 size_ -= size_in_bytes;
1446 waste_ += size_in_bytes;
1452 intptr_t max_capacity_;
1458 // -----------------------------------------------------------------------------
1459 // Free lists for old object spaces
1461 // Free-list nodes are free blocks in the heap. They look like heap objects
1462 // (free-list node pointers have the heap object tag, and they have a map like
1463 // a heap object). They have a size and a next pointer. The next pointer is
1464 // the raw address of the next free list node (or NULL).
1465 class FreeListNode: public HeapObject {
1467 // Obtain a free-list node from a raw address. This is not a cast because
1468 // it does not check nor require that the first word at the address is a map
1470 static FreeListNode* FromAddress(Address address) {
1471 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1474 static inline bool IsFreeListNode(HeapObject* object);
1476 // Set the size in bytes, which can be read with HeapObject::Size(). This
1477 // function also writes a map to the first word of the block so that it
1478 // looks like a heap object to the garbage collector and heap iteration
1480 void set_size(Heap* heap, int size_in_bytes);
1482 // Accessors for the next field.
1483 inline FreeListNode* next();
1484 inline FreeListNode** next_address();
1485 inline void set_next(FreeListNode* next);
1489 static inline FreeListNode* cast(MaybeObject* maybe) {
1490 ASSERT(!maybe->IsFailure());
1491 return reinterpret_cast<FreeListNode*>(maybe);
1495 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1497 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1501 // The free list category holds a pointer to the top element and a pointer to
1502 // the end element of the linked list of free memory blocks.
1503 class FreeListCategory {
1505 FreeListCategory() :
1510 intptr_t Concatenate(FreeListCategory* category);
1514 void Free(FreeListNode* node, int size_in_bytes);
1516 FreeListNode* PickNodeFromList(int *node_size);
1517 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1519 intptr_t EvictFreeListItemsInList(Page* p);
1520 bool ContainsPageFreeListItemsInList(Page* p);
1522 void RepairFreeList(Heap* heap);
1524 FreeListNode** GetTopAddress() { return &top_; }
1525 FreeListNode* top() const { return top_; }
1526 void set_top(FreeListNode* top) { top_ = top; }
1528 FreeListNode** GetEndAddress() { return &end_; }
1529 FreeListNode* end() const { return end_; }
1530 void set_end(FreeListNode* end) { end_ = end; }
1532 int* GetAvailableAddress() { return &available_; }
1533 int available() const { return available_; }
1534 void set_available(int available) { available_ = available; }
1536 Mutex* mutex() { return &mutex_; }
1539 return top_ == NULL;
1543 intptr_t SumFreeList();
1544 int FreeListLength();
1552 // Total available bytes in all blocks of this free list category.
1557 // The free list for the old space. The free list is organized in such a way
1558 // as to encourage objects allocated around the same time to be near each
1559 // other. The normal way to allocate is intended to be by bumping a 'top'
1560 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1561 // find a new space to allocate from. This is done with the free list, which
1562 // is divided up into rough categories to cut down on waste. Having finer
1563 // categories would scatter allocation more.
1565 // The old space free list is organized in categories.
1566 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1567 // They can be reclaimed by the compactor. However the distance between top
1568 // and limit may be this small.
1569 // 32-255 words: There is a list of spaces this large. It is used for top and
1570 // limit when the object we need to allocate is 1-31 words in size. These
1571 // spaces are called small.
1572 // 256-2047 words: There is a list of spaces this large. It is used for top and
1573 // limit when the object we need to allocate is 32-255 words in size. These
1574 // spaces are called medium.
1575 // 1048-16383 words: There is a list of spaces this large. It is used for top
1576 // and limit when the object we need to allocate is 256-2047 words in size.
1577 // These spaces are call large.
1578 // At least 16384 words. This list is for objects of 2048 words or larger.
1579 // Empty pages are added to this list. These spaces are called huge.
1582 explicit FreeList(PagedSpace* owner);
1584 intptr_t Concatenate(FreeList* free_list);
1586 // Clear the free list.
1589 // Return the number of bytes available on the free list.
1590 intptr_t available() {
1591 return small_list_.available() + medium_list_.available() +
1592 large_list_.available() + huge_list_.available();
1595 // Place a node on the free list. The block of size 'size_in_bytes'
1596 // starting at 'start' is placed on the free list. The return value is the
1597 // number of bytes that have been lost due to internal fragmentation by
1598 // freeing the block. Bookkeeping information will be written to the block,
1599 // i.e., its contents will be destroyed. The start address should be word
1600 // aligned, and the size should be a non-zero multiple of the word size.
1601 int Free(Address start, int size_in_bytes);
1603 // Allocate a block of size 'size_in_bytes' from the free list. The block
1604 // is unitialized. A failure is returned if no block is available. The
1605 // number of bytes lost to fragmentation is returned in the output parameter
1606 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1607 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1610 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1611 large_list_.IsEmpty() && huge_list_.IsEmpty();
1616 intptr_t SumFreeLists();
1620 // Used after booting the VM.
1621 void RepairLists(Heap* heap);
1623 intptr_t EvictFreeListItems(Page* p);
1624 bool ContainsPageFreeListItems(Page* p);
1626 FreeListCategory* small_list() { return &small_list_; }
1627 FreeListCategory* medium_list() { return &medium_list_; }
1628 FreeListCategory* large_list() { return &large_list_; }
1629 FreeListCategory* huge_list() { return &huge_list_; }
1632 // The size range of blocks, in bytes.
1633 static const int kMinBlockSize = 3 * kPointerSize;
1634 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1636 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1641 static const int kSmallListMin = 0x20 * kPointerSize;
1642 static const int kSmallListMax = 0xff * kPointerSize;
1643 static const int kMediumListMax = 0x7ff * kPointerSize;
1644 static const int kLargeListMax = 0x3fff * kPointerSize;
1645 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1646 static const int kMediumAllocationMax = kSmallListMax;
1647 static const int kLargeAllocationMax = kMediumListMax;
1648 FreeListCategory small_list_;
1649 FreeListCategory medium_list_;
1650 FreeListCategory large_list_;
1651 FreeListCategory huge_list_;
1653 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1657 class PagedSpace : public Space {
1659 // Creates a space with a maximum capacity, and an id.
1660 PagedSpace(Heap* heap,
1661 intptr_t max_capacity,
1663 Executability executable);
1665 virtual ~PagedSpace() {}
1667 // Set up the space using the given address range of virtual memory (from
1668 // the memory allocator's initial chunk) if possible. If the block of
1669 // addresses is not big enough to contain a single page-aligned page, a
1670 // fresh chunk will be allocated.
1673 // Returns true if the space has been successfully set up and not
1674 // subsequently torn down.
1675 bool HasBeenSetUp();
1677 // Cleans up the space, frees all pages in this space except those belonging
1678 // to the initial chunk, uncommits addresses in the initial chunk.
1681 // Checks whether an object/address is in this space.
1682 inline bool Contains(Address a);
1683 bool Contains(HeapObject* o) { return Contains(o->address()); }
1685 // Given an address occupied by a live object, return that object if it is
1686 // in this space, or Failure::Exception() if it is not. The implementation
1687 // iterates over objects in the page containing the address, the cost is
1688 // linear in the number of objects in the page. It may be slow.
1689 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
1691 // During boot the free_space_map is created, and afterwards we may need
1692 // to write it into the free list nodes that were already created.
1693 void RepairFreeListsAfterBoot();
1695 // Prepares for a mark-compact GC.
1696 void PrepareForMarkCompact();
1698 // Current capacity without growing (Size() + Available()).
1699 intptr_t Capacity() { return accounting_stats_.Capacity(); }
1701 // Total amount of memory committed for this space. For paged
1702 // spaces this equals the capacity.
1703 intptr_t CommittedMemory() { return Capacity(); }
1705 // The maximum amount of memory ever committed for this space.
1706 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1708 // Approximate amount of physical memory committed for this space.
1709 size_t CommittedPhysicalMemory();
1713 return small_size_ + medium_size_ + large_size_ + huge_size_;
1716 intptr_t small_size_;
1717 intptr_t medium_size_;
1718 intptr_t large_size_;
1719 intptr_t huge_size_;
1722 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1723 void ResetFreeListStatistics();
1725 // Sets the capacity, the available space and the wasted space to zero.
1726 // The stats are rebuilt during sweeping by adding each page to the
1727 // capacity and the size when it is encountered. As free spaces are
1728 // discovered during the sweeping they are subtracted from the size and added
1729 // to the available and wasted totals.
1731 accounting_stats_.ClearSizeWaste();
1732 ResetFreeListStatistics();
1735 // Increases the number of available bytes of that space.
1736 void AddToAccountingStats(intptr_t bytes) {
1737 accounting_stats_.DeallocateBytes(bytes);
1740 // Available bytes without growing. These are the bytes on the free list.
1741 // The bytes in the linear allocation area are not included in this total
1742 // because updating the stats would slow down allocation. New pages are
1743 // immediately added to the free list so they show up here.
1744 intptr_t Available() { return free_list_.available(); }
1746 // Allocated bytes in this space. Garbage bytes that were not found due to
1747 // lazy sweeping are counted as being allocated! The bytes in the current
1748 // linear allocation area (between top and limit) are also counted here.
1749 virtual intptr_t Size() { return accounting_stats_.Size(); }
1751 // As size, but the bytes in lazily swept pages are estimated and the bytes
1752 // in the current linear allocation area are not included.
1753 virtual intptr_t SizeOfObjects();
1755 // Wasted bytes in this space. These are just the bytes that were thrown away
1756 // due to being too small to use for allocation. They do not include the
1757 // free bytes that were not found at all due to lazy sweeping.
1758 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1760 // Returns the allocation pointer in this space.
1761 Address top() { return allocation_info_.top(); }
1762 Address limit() { return allocation_info_.limit(); }
1764 // The allocation top address.
1765 Address* allocation_top_address() {
1766 return allocation_info_.top_address();
1769 // The allocation limit address.
1770 Address* allocation_limit_address() {
1771 return allocation_info_.limit_address();
1774 // Allocate the requested number of bytes in the space if possible, return a
1775 // failure object if not.
1776 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
1778 // Give a block of memory to the space's free list. It might be added to
1779 // the free list or accounted as waste.
1780 // If add_to_freelist is false then just accounting stats are updated and
1781 // no attempt to add area to free list is made.
1782 int Free(Address start, int size_in_bytes) {
1783 int wasted = free_list_.Free(start, size_in_bytes);
1784 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1785 return size_in_bytes - wasted;
1788 void ResetFreeList() {
1792 // Set space allocation info.
1793 void SetTopAndLimit(Address top, Address limit) {
1794 ASSERT(top == limit ||
1795 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1796 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1797 allocation_info_.set_top(top);
1798 allocation_info_.set_limit(limit);
1801 // Empty space allocation info, returning unused area to free list.
1802 void EmptyAllocationInfo() {
1803 // Mark the old linear allocation area with a free space map so it can be
1804 // skipped when scanning the heap.
1805 int old_linear_size = static_cast<int>(limit() - top());
1806 Free(top(), old_linear_size);
1807 SetTopAndLimit(NULL, NULL);
1810 void Allocate(int bytes) {
1811 accounting_stats_.AllocateBytes(bytes);
1814 void IncreaseCapacity(int size);
1816 // Releases an unused page and shrinks the space.
1817 void ReleasePage(Page* page, bool unlink);
1819 // The dummy page that anchors the linked list of pages.
1820 Page* anchor() { return &anchor_; }
1823 // Verify integrity of this space.
1824 virtual void Verify(ObjectVisitor* visitor);
1826 // Overridden by subclasses to verify space-specific object
1827 // properties (e.g., only maps or free-list nodes are in map space).
1828 virtual void VerifyObject(HeapObject* obj) {}
1832 // Print meta info and objects in this space.
1833 virtual void Print();
1835 // Reports statistics for the space
1836 void ReportStatistics();
1838 // Report code object related statistics
1839 void CollectCodeStatistics();
1840 static void ReportCodeStatistics(Isolate* isolate);
1841 static void ResetCodeStatistics(Isolate* isolate);
1844 bool was_swept_conservatively() { return was_swept_conservatively_; }
1845 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
1847 // Evacuation candidates are swept by evacuator. Needs to return a valid
1848 // result before _and_ after evacuation has finished.
1849 static bool ShouldBeSweptLazily(Page* p) {
1850 return !p->IsEvacuationCandidate() &&
1851 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1852 !p->WasSweptPrecisely();
1855 void SetPagesToSweep(Page* first) {
1856 ASSERT(unswept_free_bytes_ == 0);
1857 if (first == &anchor_) first = NULL;
1858 first_unswept_page_ = first;
1861 void IncrementUnsweptFreeBytes(intptr_t by) {
1862 unswept_free_bytes_ += by;
1865 void IncreaseUnsweptFreeBytes(Page* p) {
1866 ASSERT(ShouldBeSweptLazily(p));
1867 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1870 void DecrementUnsweptFreeBytes(intptr_t by) {
1871 unswept_free_bytes_ -= by;
1874 void DecreaseUnsweptFreeBytes(Page* p) {
1875 ASSERT(ShouldBeSweptLazily(p));
1876 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1879 void ResetUnsweptFreeBytes() {
1880 unswept_free_bytes_ = 0;
1883 bool AdvanceSweeper(intptr_t bytes_to_sweep);
1885 // When parallel sweeper threads are active and the main thread finished
1886 // its sweeping phase, this function waits for them to complete, otherwise
1887 // AdvanceSweeper with size_in_bytes is called.
1888 bool EnsureSweeperProgress(intptr_t size_in_bytes);
1890 bool IsLazySweepingComplete() {
1891 return !first_unswept_page_->is_valid();
1894 Page* FirstPage() { return anchor_.next_page(); }
1895 Page* LastPage() { return anchor_.prev_page(); }
1897 void EvictEvacuationCandidatesFromFreeLists();
1901 // Returns the number of total pages in this space.
1902 int CountTotalPages();
1904 // Return size of allocatable area on a page in this space.
1905 inline int AreaSize() {
1910 FreeList* free_list() { return &free_list_; }
1914 // Maximum capacity of this space.
1915 intptr_t max_capacity_;
1917 intptr_t SizeOfFirstPage();
1919 // Accounting information for this space.
1920 AllocationStats accounting_stats_;
1922 // The dummy page that anchors the double linked list of pages.
1925 // The space's free list.
1926 FreeList free_list_;
1928 // Normal allocation information.
1929 AllocationInfo allocation_info_;
1931 bool was_swept_conservatively_;
1933 // The first page to be swept when the lazy sweeper advances. Is set
1934 // to NULL when all pages have been swept.
1935 Page* first_unswept_page_;
1937 // The number of free bytes which could be reclaimed by advancing the
1938 // lazy sweeper. This is only an estimation because lazy sweeping is
1939 // done conservatively.
1940 intptr_t unswept_free_bytes_;
1942 // Expands the space by allocating a fixed number of pages. Returns false if
1943 // it cannot allocate requested number of pages from OS, or if the hard heap
1944 // size limit has been hit.
1947 // Generic fast case allocation function that tries linear allocation at the
1948 // address denoted by top in allocation_info_.
1949 inline HeapObject* AllocateLinearly(int size_in_bytes);
1951 // Slow path of AllocateRaw. This function is space-dependent.
1952 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
1954 friend class PageIterator;
1955 friend class MarkCompactCollector;
1959 class NumberAndSizeInfo BASE_EMBEDDED {
1961 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1963 int number() const { return number_; }
1964 void increment_number(int num) { number_ += num; }
1966 int bytes() const { return bytes_; }
1967 void increment_bytes(int size) { bytes_ += size; }
1980 // HistogramInfo class for recording a single "bar" of a histogram. This
1981 // class is used for collecting statistics to print to the log file.
1982 class HistogramInfo: public NumberAndSizeInfo {
1984 HistogramInfo() : NumberAndSizeInfo() {}
1986 const char* name() { return name_; }
1987 void set_name(const char* name) { name_ = name; }
2003 class NewSpacePage : public MemoryChunk {
2005 // GC related flags copied from from-space to to-space when
2006 // flipping semispaces.
2007 static const intptr_t kCopyOnFlipFlagsMask =
2008 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2009 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2010 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2012 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2014 inline NewSpacePage* next_page() const {
2015 return static_cast<NewSpacePage*>(next_chunk());
2018 inline void set_next_page(NewSpacePage* page) {
2019 set_next_chunk(page);
2022 inline NewSpacePage* prev_page() const {
2023 return static_cast<NewSpacePage*>(prev_chunk());
2026 inline void set_prev_page(NewSpacePage* page) {
2027 set_prev_chunk(page);
2030 SemiSpace* semi_space() {
2031 return reinterpret_cast<SemiSpace*>(owner());
2034 bool is_anchor() { return !this->InNewSpace(); }
2036 static bool IsAtStart(Address addr) {
2037 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2038 == kObjectStartOffset;
2041 static bool IsAtEnd(Address addr) {
2042 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2046 return reinterpret_cast<Address>(this);
2049 // Finds the NewSpacePage containg the given address.
2050 static inline NewSpacePage* FromAddress(Address address_in_page) {
2051 Address page_start =
2052 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2053 ~Page::kPageAlignmentMask);
2054 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2058 // Find the page for a limit address. A limit address is either an address
2059 // inside a page, or the address right after the last byte of a page.
2060 static inline NewSpacePage* FromLimit(Address address_limit) {
2061 return NewSpacePage::FromAddress(address_limit - 1);
2064 // Checks if address1 and address2 are on the same new space page.
2065 static inline bool OnSamePage(Address address1, Address address2) {
2066 return NewSpacePage::FromAddress(address1) ==
2067 NewSpacePage::FromAddress(address2);
2071 // Create a NewSpacePage object that is only used as anchor
2072 // for the doubly-linked list of real pages.
2073 explicit NewSpacePage(SemiSpace* owner) {
2074 InitializeAsAnchor(owner);
2077 static NewSpacePage* Initialize(Heap* heap,
2079 SemiSpace* semi_space);
2081 // Intialize a fake NewSpacePage used as sentinel at the ends
2082 // of a doubly-linked list of real NewSpacePages.
2083 // Only uses the prev/next links, and sets flags to not be in new-space.
2084 void InitializeAsAnchor(SemiSpace* owner);
2086 friend class SemiSpace;
2087 friend class SemiSpaceIterator;
2091 // -----------------------------------------------------------------------------
2092 // SemiSpace in young generation
2094 // A semispace is a contiguous chunk of memory holding page-like memory
2095 // chunks. The mark-compact collector uses the memory of the first page in
2096 // the from space as a marking stack when tracing live objects.
2098 class SemiSpace : public Space {
2101 SemiSpace(Heap* heap, SemiSpaceId semispace)
2102 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2107 current_page_(NULL) { }
2109 // Sets up the semispace using the given chunk.
2110 void SetUp(Address start, int initial_capacity, int maximum_capacity);
2112 // Tear down the space. Heap memory was not allocated by the space, so it
2113 // is not deallocated here.
2116 // True if the space has been set up but not torn down.
2117 bool HasBeenSetUp() { return start_ != NULL; }
2119 // Grow the semispace to the new capacity. The new capacity
2120 // requested must be larger than the current capacity and less than
2121 // the maximum capacity.
2122 bool GrowTo(int new_capacity);
2124 // Shrinks the semispace to the new capacity. The new capacity
2125 // requested must be more than the amount of used memory in the
2126 // semispace and less than the current capacity.
2127 bool ShrinkTo(int new_capacity);
2129 // Returns the start address of the first page of the space.
2130 Address space_start() {
2131 ASSERT(anchor_.next_page() != &anchor_);
2132 return anchor_.next_page()->area_start();
2135 // Returns the start address of the current page of the space.
2136 Address page_low() {
2137 return current_page_->area_start();
2140 // Returns one past the end address of the space.
2141 Address space_end() {
2142 return anchor_.prev_page()->area_end();
2145 // Returns one past the end address of the current page of the space.
2146 Address page_high() {
2147 return current_page_->area_end();
2150 bool AdvancePage() {
2151 NewSpacePage* next_page = current_page_->next_page();
2152 if (next_page == anchor()) return false;
2153 current_page_ = next_page;
2157 // Resets the space to using the first page.
2160 // Age mark accessors.
2161 Address age_mark() { return age_mark_; }
2162 void set_age_mark(Address mark);
2164 // True if the address is in the address range of this semispace (not
2165 // necessarily below the allocation pointer).
2166 bool Contains(Address a) {
2167 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2168 == reinterpret_cast<uintptr_t>(start_);
2171 // True if the object is a heap object in the address range of this
2172 // semispace (not necessarily below the allocation pointer).
2173 bool Contains(Object* o) {
2174 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2177 // If we don't have these here then SemiSpace will be abstract. However
2178 // they should never be called.
2179 virtual intptr_t Size() {
2184 bool is_committed() { return committed_; }
2188 NewSpacePage* first_page() { return anchor_.next_page(); }
2189 NewSpacePage* current_page() { return current_page_; }
2192 virtual void Verify();
2196 virtual void Print();
2197 // Validate a range of of addresses in a SemiSpace.
2198 // The "from" address must be on a page prior to the "to" address,
2199 // in the linked page order, or it must be earlier on the same page.
2200 static void AssertValidRange(Address from, Address to);
2203 inline static void AssertValidRange(Address from, Address to) {}
2206 // Returns the current capacity of the semi space.
2207 int Capacity() { return capacity_; }
2209 // Returns the maximum capacity of the semi space.
2210 int MaximumCapacity() { return maximum_capacity_; }
2212 // Returns the initial capacity of the semi space.
2213 int InitialCapacity() { return initial_capacity_; }
2215 SemiSpaceId id() { return id_; }
2217 static void Swap(SemiSpace* from, SemiSpace* to);
2219 // Returns the maximum amount of memory ever committed by the semi space.
2220 size_t MaximumCommittedMemory() { return maximum_committed_; }
2222 // Approximate amount of physical memory committed for this space.
2223 size_t CommittedPhysicalMemory();
2226 // Flips the semispace between being from-space and to-space.
2227 // Copies the flags into the masked positions on all pages in the space.
2228 void FlipPages(intptr_t flags, intptr_t flag_mask);
2230 // Updates Capacity and MaximumCommitted based on new capacity.
2231 void SetCapacity(int new_capacity);
2233 NewSpacePage* anchor() { return &anchor_; }
2235 // The current and maximum capacity of the space.
2237 int maximum_capacity_;
2238 int initial_capacity_;
2240 intptr_t maximum_committed_;
2242 // The start address of the space.
2244 // Used to govern object promotion during mark-compact collection.
2247 // Masks and comparison values to test for containment in this semispace.
2248 uintptr_t address_mask_;
2249 uintptr_t object_mask_;
2250 uintptr_t object_expected_;
2255 NewSpacePage anchor_;
2256 NewSpacePage* current_page_;
2258 friend class SemiSpaceIterator;
2259 friend class NewSpacePageIterator;
2261 TRACK_MEMORY("SemiSpace")
2265 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2266 // semispace of the heap's new space. It iterates over the objects in the
2267 // semispace from a given start address (defaulting to the bottom of the
2268 // semispace) to the top of the semispace. New objects allocated after the
2269 // iterator is created are not iterated.
2270 class SemiSpaceIterator : public ObjectIterator {
2272 // Create an iterator over the objects in the given space. If no start
2273 // address is given, the iterator starts from the bottom of the space. If
2274 // no size function is given, the iterator calls Object::Size().
2276 // Iterate over all of allocated to-space.
2277 explicit SemiSpaceIterator(NewSpace* space);
2278 // Iterate over all of allocated to-space, with a custome size function.
2279 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2280 // Iterate over part of allocated to-space, from start to the end
2282 SemiSpaceIterator(NewSpace* space, Address start);
2283 // Iterate from one address to another in the same semi-space.
2284 SemiSpaceIterator(Address from, Address to);
2286 HeapObject* Next() {
2287 if (current_ == limit_) return NULL;
2288 if (NewSpacePage::IsAtEnd(current_)) {
2289 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2290 page = page->next_page();
2291 ASSERT(!page->is_anchor());
2292 current_ = page->area_start();
2293 if (current_ == limit_) return NULL;
2296 HeapObject* object = HeapObject::FromAddress(current_);
2297 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2303 // Implementation of the ObjectIterator functions.
2304 virtual HeapObject* next_object() { return Next(); }
2307 void Initialize(Address start,
2309 HeapObjectCallback size_func);
2311 // The current iteration point.
2313 // The end of iteration.
2315 // The callback function.
2316 HeapObjectCallback size_func_;
2320 // -----------------------------------------------------------------------------
2321 // A PageIterator iterates the pages in a semi-space.
2322 class NewSpacePageIterator BASE_EMBEDDED {
2324 // Make an iterator that runs over all pages in to-space.
2325 explicit inline NewSpacePageIterator(NewSpace* space);
2327 // Make an iterator that runs over all pages in the given semispace,
2328 // even those not used in allocation.
2329 explicit inline NewSpacePageIterator(SemiSpace* space);
2331 // Make iterator that iterates from the page containing start
2332 // to the page that contains limit in the same semispace.
2333 inline NewSpacePageIterator(Address start, Address limit);
2335 inline bool has_next();
2336 inline NewSpacePage* next();
2339 NewSpacePage* prev_page_; // Previous page returned.
2340 // Next page that will be returned. Cached here so that we can use this
2341 // iterator for operations that deallocate pages.
2342 NewSpacePage* next_page_;
2343 // Last page returned.
2344 NewSpacePage* last_page_;
2348 // -----------------------------------------------------------------------------
2349 // The young generation space.
2351 // The new space consists of a contiguous pair of semispaces. It simply
2352 // forwards most functions to the appropriate semispace.
2354 class NewSpace : public Space {
2357 explicit NewSpace(Heap* heap)
2358 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2359 to_space_(heap, kToSpace),
2360 from_space_(heap, kFromSpace),
2362 inline_allocation_limit_step_(0) {}
2364 // Sets up the new space using the given chunk.
2365 bool SetUp(int reserved_semispace_size_, int max_semispace_size);
2367 // Tears down the space. Heap memory was not allocated by the space, so it
2368 // is not deallocated here.
2371 // True if the space has been set up but not torn down.
2372 bool HasBeenSetUp() {
2373 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2376 // Flip the pair of spaces.
2379 // Grow the capacity of the semispaces. Assumes that they are not at
2380 // their maximum capacity.
2383 // Shrink the capacity of the semispaces.
2386 // True if the address or object lies in the address range of either
2387 // semispace (not necessarily below the allocation pointer).
2388 bool Contains(Address a) {
2389 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2390 == reinterpret_cast<uintptr_t>(start_);
2393 bool Contains(Object* o) {
2394 Address a = reinterpret_cast<Address>(o);
2395 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2398 // Return the allocated bytes in the active semispace.
2399 virtual intptr_t Size() {
2400 return pages_used_ * NewSpacePage::kAreaSize +
2401 static_cast<int>(top() - to_space_.page_low());
2404 // The same, but returning an int. We have to have the one that returns
2405 // intptr_t because it is inherited, but if we know we are dealing with the
2406 // new space, which can't get as big as the other spaces then this is useful:
2407 int SizeAsInt() { return static_cast<int>(Size()); }
2409 // Return the current capacity of a semispace.
2410 intptr_t EffectiveCapacity() {
2411 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2412 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2415 // Return the current capacity of a semispace.
2416 intptr_t Capacity() {
2417 ASSERT(to_space_.Capacity() == from_space_.Capacity());
2418 return to_space_.Capacity();
2421 // Return the total amount of memory committed for new space.
2422 intptr_t CommittedMemory() {
2423 if (from_space_.is_committed()) return 2 * Capacity();
2427 // Return the total amount of memory committed for new space.
2428 intptr_t MaximumCommittedMemory() {
2429 return to_space_.MaximumCommittedMemory() +
2430 from_space_.MaximumCommittedMemory();
2433 // Approximate amount of physical memory committed for this space.
2434 size_t CommittedPhysicalMemory();
2436 // Return the available bytes without growing.
2437 intptr_t Available() {
2438 return Capacity() - Size();
2441 // Return the maximum capacity of a semispace.
2442 int MaximumCapacity() {
2443 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2444 return to_space_.MaximumCapacity();
2447 // Returns the initial capacity of a semispace.
2448 int InitialCapacity() {
2449 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2450 return to_space_.InitialCapacity();
2453 // Return the address of the allocation pointer in the active semispace.
2455 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2456 return allocation_info_.top();
2459 void set_top(Address top) {
2460 ASSERT(to_space_.current_page()->ContainsLimit(top));
2461 allocation_info_.set_top(top);
2464 // Return the address of the allocation pointer limit in the active semispace.
2466 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2467 return allocation_info_.limit();
2470 // Return the address of the first object in the active semispace.
2471 Address bottom() { return to_space_.space_start(); }
2473 // Get the age mark of the inactive semispace.
2474 Address age_mark() { return from_space_.age_mark(); }
2475 // Set the age mark in the active semispace.
2476 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2478 // The start address of the space and a bit mask. Anding an address in the
2479 // new space with the mask will result in the start address.
2480 Address start() { return start_; }
2481 uintptr_t mask() { return address_mask_; }
2483 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2484 ASSERT(Contains(addr));
2485 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2486 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2487 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2490 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2491 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2494 // The allocation top and limit address.
2495 Address* allocation_top_address() {
2496 return allocation_info_.top_address();
2499 // The allocation limit address.
2500 Address* allocation_limit_address() {
2501 return allocation_info_.limit_address();
2504 MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
2506 // Reset the allocation pointer to the beginning of the active semispace.
2507 void ResetAllocationInfo();
2509 void UpdateInlineAllocationLimit(int size_in_bytes);
2510 void LowerInlineAllocationLimit(intptr_t step) {
2511 inline_allocation_limit_step_ = step;
2512 UpdateInlineAllocationLimit(0);
2513 top_on_previous_step_ = allocation_info_.top();
2516 // Get the extent of the inactive semispace (for use as a marking stack,
2517 // or to zap it). Notice: space-addresses are not necessarily on the
2518 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2519 Address FromSpacePageLow() { return from_space_.page_low(); }
2520 Address FromSpacePageHigh() { return from_space_.page_high(); }
2521 Address FromSpaceStart() { return from_space_.space_start(); }
2522 Address FromSpaceEnd() { return from_space_.space_end(); }
2524 // Get the extent of the active semispace's pages' memory.
2525 Address ToSpaceStart() { return to_space_.space_start(); }
2526 Address ToSpaceEnd() { return to_space_.space_end(); }
2528 inline bool ToSpaceContains(Address address) {
2529 return to_space_.Contains(address);
2531 inline bool FromSpaceContains(Address address) {
2532 return from_space_.Contains(address);
2535 // True if the object is a heap object in the address range of the
2536 // respective semispace (not necessarily below the allocation pointer of the
2538 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2539 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2541 // Try to switch the active semispace to a new, empty, page.
2542 // Returns false if this isn't possible or reasonable (i.e., there
2543 // are no pages, or the current page is already empty), or true
2545 bool AddFreshPage();
2548 // Verify the active semispace.
2549 virtual void Verify();
2553 // Print the active semispace.
2554 virtual void Print() { to_space_.Print(); }
2557 // Iterates the active semispace to collect statistics.
2558 void CollectStatistics();
2559 // Reports previously collected statistics of the active semispace.
2560 void ReportStatistics();
2561 // Clears previously collected statistics.
2562 void ClearHistograms();
2564 // Record the allocation or promotion of a heap object. Note that we don't
2565 // record every single allocation, but only those that happen in the
2566 // to space during a scavenge GC.
2567 void RecordAllocation(HeapObject* obj);
2568 void RecordPromotion(HeapObject* obj);
2570 // Return whether the operation succeded.
2571 bool CommitFromSpaceIfNeeded() {
2572 if (from_space_.is_committed()) return true;
2573 return from_space_.Commit();
2576 bool UncommitFromSpace() {
2577 if (!from_space_.is_committed()) return true;
2578 return from_space_.Uncommit();
2581 inline intptr_t inline_allocation_limit_step() {
2582 return inline_allocation_limit_step_;
2585 SemiSpace* active_space() { return &to_space_; }
2588 // Update allocation info to match the current to-space page.
2589 void UpdateAllocationInfo();
2591 Address chunk_base_;
2592 uintptr_t chunk_size_;
2595 SemiSpace to_space_;
2596 SemiSpace from_space_;
2597 VirtualMemory reservation_;
2600 // Start address and bit mask for containment testing.
2602 uintptr_t address_mask_;
2603 uintptr_t object_mask_;
2604 uintptr_t object_expected_;
2606 // Allocation pointer and limit for normal allocation and allocation during
2607 // mark-compact collection.
2608 AllocationInfo allocation_info_;
2610 // When incremental marking is active we will set allocation_info_.limit
2611 // to be lower than actual limit and then will gradually increase it
2612 // in steps to guarantee that we do incremental marking steps even
2613 // when all allocation is performed from inlined generated code.
2614 intptr_t inline_allocation_limit_step_;
2616 Address top_on_previous_step_;
2618 HistogramInfo* allocated_histogram_;
2619 HistogramInfo* promoted_histogram_;
2621 MUST_USE_RESULT MaybeObject* SlowAllocateRaw(int size_in_bytes);
2623 friend class SemiSpaceIterator;
2626 TRACK_MEMORY("NewSpace")
2630 // -----------------------------------------------------------------------------
2631 // Old object space (excluding map objects)
2633 class OldSpace : public PagedSpace {
2635 // Creates an old space object with a given maximum capacity.
2636 // The constructor does not allocate pages from OS.
2637 OldSpace(Heap* heap,
2638 intptr_t max_capacity,
2640 Executability executable)
2641 : PagedSpace(heap, max_capacity, id, executable) {
2645 TRACK_MEMORY("OldSpace")
2649 // For contiguous spaces, top should be in the space (or at the end) and limit
2650 // should be the end of the space.
2651 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2652 SLOW_ASSERT((space).page_low() <= (info).top() \
2653 && (info).top() <= (space).page_high() \
2654 && (info).limit() <= (space).page_high())
2657 // -----------------------------------------------------------------------------
2658 // Old space for all map objects
2660 class MapSpace : public PagedSpace {
2662 // Creates a map space object with a maximum capacity.
2663 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2664 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2665 max_map_space_pages_(kMaxMapPageIndex - 1) {
2668 // Given an index, returns the page address.
2669 // TODO(1600): this limit is artifical just to keep code compilable
2670 static const int kMaxMapPageIndex = 1 << 16;
2672 virtual int RoundSizeDownToObjectAlignment(int size) {
2673 if (IsPowerOf2(Map::kSize)) {
2674 return RoundDown(size, Map::kSize);
2676 return (size / Map::kSize) * Map::kSize;
2681 virtual void VerifyObject(HeapObject* obj);
2684 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2686 // Do map space compaction if there is a page gap.
2687 int CompactionThreshold() {
2688 return kMapsPerPage * (max_map_space_pages_ - 1);
2691 const int max_map_space_pages_;
2694 TRACK_MEMORY("MapSpace")
2698 // -----------------------------------------------------------------------------
2699 // Old space for simple property cell objects
2701 class CellSpace : public PagedSpace {
2703 // Creates a property cell space object with a maximum capacity.
2704 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2705 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2708 virtual int RoundSizeDownToObjectAlignment(int size) {
2709 if (IsPowerOf2(Cell::kSize)) {
2710 return RoundDown(size, Cell::kSize);
2712 return (size / Cell::kSize) * Cell::kSize;
2717 virtual void VerifyObject(HeapObject* obj);
2720 TRACK_MEMORY("CellSpace")
2724 // -----------------------------------------------------------------------------
2725 // Old space for all global object property cell objects
2727 class PropertyCellSpace : public PagedSpace {
2729 // Creates a property cell space object with a maximum capacity.
2730 PropertyCellSpace(Heap* heap, intptr_t max_capacity,
2732 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2735 virtual int RoundSizeDownToObjectAlignment(int size) {
2736 if (IsPowerOf2(PropertyCell::kSize)) {
2737 return RoundDown(size, PropertyCell::kSize);
2739 return (size / PropertyCell::kSize) * PropertyCell::kSize;
2744 virtual void VerifyObject(HeapObject* obj);
2747 TRACK_MEMORY("PropertyCellSpace")
2751 // -----------------------------------------------------------------------------
2752 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2753 // the large object space. A large object is allocated from OS heap with
2754 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2755 // A large object always starts at Page::kObjectStartOffset to a page.
2756 // Large objects do not move during garbage collections.
2758 class LargeObjectSpace : public Space {
2760 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2761 virtual ~LargeObjectSpace() {}
2763 // Initializes internal data structures.
2766 // Releases internal resources, frees objects in this space.
2769 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2770 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2771 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2774 // Shared implementation of AllocateRaw, AllocateRawCode and
2775 // AllocateRawFixedArray.
2776 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size,
2777 Executability executable);
2779 // Available bytes for objects in this space.
2780 inline intptr_t Available();
2782 virtual intptr_t Size() {
2786 virtual intptr_t SizeOfObjects() {
2787 return objects_size_;
2790 intptr_t MaximumCommittedMemory() {
2791 return maximum_committed_;
2794 intptr_t CommittedMemory() {
2798 // Approximate amount of physical memory committed for this space.
2799 size_t CommittedPhysicalMemory();
2805 // Finds an object for a given address, returns Failure::Exception()
2806 // if it is not found. The function iterates through all objects in this
2807 // space, may be slow.
2808 MaybeObject* FindObject(Address a);
2810 // Finds a large object page containing the given address, returns NULL
2811 // if such a page doesn't exist.
2812 LargePage* FindPage(Address a);
2814 // Frees unmarked objects.
2815 void FreeUnmarkedObjects();
2817 // Checks whether a heap object is in this space; O(1).
2818 bool Contains(HeapObject* obj);
2820 // Checks whether the space is empty.
2821 bool IsEmpty() { return first_page_ == NULL; }
2823 LargePage* first_page() { return first_page_; }
2826 virtual void Verify();
2830 virtual void Print();
2831 void ReportStatistics();
2832 void CollectCodeStatistics();
2834 // Checks whether an address is in the object area in this space. It
2835 // iterates all objects in the space. May be slow.
2836 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2839 intptr_t max_capacity_;
2840 intptr_t maximum_committed_;
2841 // The head of the linked list of large object chunks.
2842 LargePage* first_page_;
2843 intptr_t size_; // allocated bytes
2844 int page_count_; // number of chunks
2845 intptr_t objects_size_; // size of objects
2846 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2849 friend class LargeObjectIterator;
2852 TRACK_MEMORY("LargeObjectSpace")
2856 class LargeObjectIterator: public ObjectIterator {
2858 explicit LargeObjectIterator(LargeObjectSpace* space);
2859 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2863 // implementation of ObjectIterator.
2864 virtual HeapObject* next_object() { return Next(); }
2867 LargePage* current_;
2868 HeapObjectCallback size_func_;
2872 // Iterates over the chunks (pages and large object pages) that can contain
2873 // pointers to new space.
2874 class PointerChunkIterator BASE_EMBEDDED {
2876 inline explicit PointerChunkIterator(Heap* heap);
2878 // Return NULL when the iterator is done.
2879 MemoryChunk* next() {
2881 case kOldPointerState: {
2882 if (old_pointer_iterator_.has_next()) {
2883 return old_pointer_iterator_.next();
2889 if (map_iterator_.has_next()) {
2890 return map_iterator_.next();
2892 state_ = kLargeObjectState;
2895 case kLargeObjectState: {
2896 HeapObject* heap_object;
2898 heap_object = lo_iterator_.Next();
2899 if (heap_object == NULL) {
2900 state_ = kFinishedState;
2903 // Fixed arrays are the only pointer-containing objects in large
2905 } while (!heap_object->IsFixedArray());
2906 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2909 case kFinishedState:
2927 PageIterator old_pointer_iterator_;
2928 PageIterator map_iterator_;
2929 LargeObjectIterator lo_iterator_;
2934 struct CommentStatistic {
2935 const char* comment;
2943 // Must be small, since an iteration is used for lookup.
2944 static const int kMaxComments = 64;
2949 } } // namespace v8::internal
2951 #endif // V8_SPACES_H_