1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/hashmap.h"
13 #include "src/platform/mutex.h"
14 #include "src/utils.h"
21 // -----------------------------------------------------------------------------
24 // A JS heap consists of a young generation, an old generation, and a large
25 // object space. The young generation is divided into two semispaces. A
26 // scavenger implements Cheney's copying algorithm. The old generation is
27 // separated into a map space and an old object space. The map space contains
28 // all (and only) map objects, the rest of old objects go into the old space.
29 // The old generation is collected by a mark-sweep-compact collector.
31 // The semispaces of the young generation are contiguous. The old and map
32 // spaces consists of a list of pages. A page has a page header and an object
35 // There is a separate large object space for objects larger than
36 // Page::kMaxHeapObjectSize, so that they do not have to move during
37 // collection. The large object space is paged. Pages in large object space
38 // may be larger than the page size.
40 // A store-buffer based write barrier is used to keep track of intergenerational
41 // references. See store-buffer.h.
43 // During scavenges and mark-sweep collections we sometimes (after a store
44 // buffer overflow) iterate intergenerational pointers without decoding heap
45 // object maps so if the page belongs to old pointer space or large object
46 // space it is essential to guarantee that the page does not contain any
47 // garbage pointers to new space: every pointer aligned word which satisfies
48 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
49 // new space. Thus objects in old pointer and large object spaces should have a
50 // special layout (e.g. no bare integer fields). This requirement does not
51 // apply to map space which is iterated in a special fashion. However we still
52 // require pointer fields of dead maps to be cleaned.
54 // To enable lazy cleaning of old space pages we can mark chunks of the page
55 // as being garbage. Garbage sections are marked with a special map. These
56 // sections are skipped when scanning the page, even if we are otherwise
57 // scanning without regard for object boundaries. Garbage sections are chained
58 // together to form a free list after a GC. Garbage sections created outside
59 // of GCs by object trunctation etc. may not be in the free list chain. Very
60 // small free spaces are ignored, they need only be cleaned of bogus pointers
63 // Each page may have up to one special garbage section. The start of this
64 // section is denoted by the top field in the space. The end of the section
65 // is denoted by the limit field in the space. This special garbage section
66 // is not marked with a free space map in the data. The point of this section
67 // is to enable linear allocation without having to constantly update the byte
68 // array every time the top field is updated and a new object is created. The
69 // special garbage section is not in the chain of garbage sections.
71 // Since the top and limit fields are in the space, not the page, only one page
72 // has a special garbage section, and if the top and limit are equal then there
73 // is no special garbage section.
75 // Some assertion macros used in the debugging mode.
77 #define ASSERT_PAGE_ALIGNED(address) \
78 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
80 #define ASSERT_OBJECT_ALIGNED(address) \
81 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
83 #define ASSERT_OBJECT_SIZE(size) \
84 ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
86 #define ASSERT_PAGE_OFFSET(offset) \
87 ASSERT((Page::kObjectStartOffset <= offset) \
88 && (offset <= Page::kPageSize))
90 #define ASSERT_MAP_PAGE_INDEX(index) \
91 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
95 class MemoryAllocator;
103 typedef uint32_t CellType;
105 inline MarkBit(CellType* cell, CellType mask, bool data_only)
106 : cell_(cell), mask_(mask), data_only_(data_only) { }
108 inline CellType* cell() { return cell_; }
109 inline CellType mask() { return mask_; }
112 bool operator==(const MarkBit& other) {
113 return cell_ == other.cell_ && mask_ == other.mask_;
117 inline void Set() { *cell_ |= mask_; }
118 inline bool Get() { return (*cell_ & mask_) != 0; }
119 inline void Clear() { *cell_ &= ~mask_; }
121 inline bool data_only() { return data_only_; }
123 inline MarkBit Next() {
124 CellType new_mask = mask_ << 1;
126 return MarkBit(cell_ + 1, 1, data_only_);
128 return MarkBit(cell_, new_mask, data_only_);
135 // This boolean indicates that the object is in a data-only space with no
136 // pointers. This enables some optimizations when marking.
137 // It is expected that this field is inlined and turned into control flow
138 // at the place where the MarkBit object is created.
143 // Bitmap is a sequence of cells each containing fixed number of bits.
146 static const uint32_t kBitsPerCell = 32;
147 static const uint32_t kBitsPerCellLog2 = 5;
148 static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
152 static const size_t kLength =
153 (1 << kPageSizeBits) >> (kPointerSizeLog2);
155 static const size_t kSize =
156 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
159 static int CellsForLength(int length) {
160 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
164 return CellsForLength(kLength);
167 static int SizeFor(int cells_count) {
168 return sizeof(MarkBit::CellType) * cells_count;
171 INLINE(static uint32_t IndexToCell(uint32_t index)) {
172 return index >> kBitsPerCellLog2;
175 INLINE(static uint32_t CellToIndex(uint32_t index)) {
176 return index << kBitsPerCellLog2;
179 INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
180 return (index + kBitIndexMask) & ~kBitIndexMask;
183 INLINE(MarkBit::CellType* cells()) {
184 return reinterpret_cast<MarkBit::CellType*>(this);
187 INLINE(Address address()) {
188 return reinterpret_cast<Address>(this);
191 INLINE(static Bitmap* FromAddress(Address addr)) {
192 return reinterpret_cast<Bitmap*>(addr);
195 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
196 MarkBit::CellType mask = 1 << (index & kBitIndexMask);
197 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
198 return MarkBit(cell, mask, data_only);
201 static inline void Clear(MemoryChunk* chunk);
203 static void PrintWord(uint32_t word, uint32_t himask = 0) {
204 for (uint32_t mask = 1; mask != 0; mask <<= 1) {
205 if ((mask & himask) != 0) PrintF("[");
206 PrintF((mask & word) ? "1" : "0");
207 if ((mask & himask) != 0) PrintF("]");
213 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
215 void Print(uint32_t pos, uint32_t cell) {
216 if (cell == seq_type) {
236 if (seq_length > 0) {
237 PrintF("%d: %dx%d\n",
239 seq_type == 0 ? 0 : 1,
240 seq_length * kBitsPerCell);
245 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
255 for (int i = 0; i < CellsCount(); i++) {
256 printer.Print(i, cells()[i]);
263 for (int i = 0; i < CellsCount(); i++) {
264 if (cells()[i] != 0) {
276 // MemoryChunk represents a memory region owned by a specific space.
277 // It is divided into the header and the body. Chunk start is always
278 // 1MB aligned. Start of the body is aligned so it can accommodate
282 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
283 static MemoryChunk* FromAddress(Address a) {
284 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
287 // Only works for addresses in pointer spaces, not data or code spaces.
288 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
290 Address address() { return reinterpret_cast<Address>(this); }
292 bool is_valid() { return address() != NULL; }
294 MemoryChunk* next_chunk() const {
295 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
298 MemoryChunk* prev_chunk() const {
299 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
302 void set_next_chunk(MemoryChunk* next) {
303 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
306 void set_prev_chunk(MemoryChunk* prev) {
307 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
310 Space* owner() const {
311 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
313 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
320 void set_owner(Space* space) {
321 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
322 owner_ = reinterpret_cast<Address>(space) + kFailureTag;
323 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
327 VirtualMemory* reserved_memory() {
328 return &reservation_;
331 void InitializeReservedMemory() {
332 reservation_.Reset();
335 void set_reserved_memory(VirtualMemory* reservation) {
336 ASSERT_NOT_NULL(reservation);
337 reservation_.TakeControl(reservation);
340 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
341 void initialize_scan_on_scavenge(bool scan) {
343 SetFlag(SCAN_ON_SCAVENGE);
345 ClearFlag(SCAN_ON_SCAVENGE);
348 inline void set_scan_on_scavenge(bool scan);
350 int store_buffer_counter() { return store_buffer_counter_; }
351 void set_store_buffer_counter(int counter) {
352 store_buffer_counter_ = counter;
355 bool Contains(Address addr) {
356 return addr >= area_start() && addr < area_end();
359 // Checks whether addr can be a limit of addresses in this page.
360 // It's a limit if it's in the page, or if it's just after the
361 // last byte of the page.
362 bool ContainsLimit(Address addr) {
363 return addr >= area_start() && addr <= area_end();
366 // Every n write barrier invocations we go to runtime even though
367 // we could have handled it in generated code. This lets us check
368 // whether we have hit the limit and should do some more marking.
369 static const int kWriteBarrierCounterGranularity = 500;
371 enum MemoryChunkFlags {
374 POINTERS_TO_HERE_ARE_INTERESTING,
375 POINTERS_FROM_HERE_ARE_INTERESTING,
377 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE.
378 IN_TO_SPACE, // All pages in new space has one of these two set.
379 NEW_SPACE_BELOW_AGE_MARK,
381 EVACUATION_CANDIDATE,
382 RESCAN_ON_EVACUATION,
384 // Pages swept precisely can be iterated, hitting only the live objects.
385 // Whereas those swept conservatively cannot be iterated over. Both flags
386 // indicate that marking bits have been cleared by the sweeper, otherwise
387 // marking bits are still intact.
389 WAS_SWEPT_CONSERVATIVELY,
391 // Large objects can have a progress bar in their page header. These object
392 // are scanned in increments and will be kept black while being scanned.
393 // Even if the mutator writes to them they will be kept black and a white
394 // to grey transition is performed in the value.
397 // Last flag, keep at bottom.
398 NUM_MEMORY_CHUNK_FLAGS
402 static const int kPointersToHereAreInterestingMask =
403 1 << POINTERS_TO_HERE_ARE_INTERESTING;
405 static const int kPointersFromHereAreInterestingMask =
406 1 << POINTERS_FROM_HERE_ARE_INTERESTING;
408 static const int kEvacuationCandidateMask =
409 1 << EVACUATION_CANDIDATE;
411 static const int kSkipEvacuationSlotsRecordingMask =
412 (1 << EVACUATION_CANDIDATE) |
413 (1 << RESCAN_ON_EVACUATION) |
414 (1 << IN_FROM_SPACE) |
418 void SetFlag(int flag) {
419 flags_ |= static_cast<uintptr_t>(1) << flag;
422 void ClearFlag(int flag) {
423 flags_ &= ~(static_cast<uintptr_t>(1) << flag);
426 void SetFlagTo(int flag, bool value) {
434 bool IsFlagSet(int flag) {
435 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
438 // Set or clear multiple flags at a time. The flags in the mask
439 // are set to the value in "flags", the rest retain the current value
441 void SetFlags(intptr_t flags, intptr_t mask) {
442 flags_ = (flags_ & ~mask) | (flags & mask);
445 // Return all current flags.
446 intptr_t GetFlags() { return flags_; }
449 // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
450 // sweeping must not be performed on that page.
451 // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
452 // page and will not touch the page memory anymore.
453 // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
455 // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
456 enum ParallelSweepingState {
457 PARALLEL_SWEEPING_DONE,
458 PARALLEL_SWEEPING_FINALIZE,
459 PARALLEL_SWEEPING_IN_PROGRESS,
460 PARALLEL_SWEEPING_PENDING
463 ParallelSweepingState parallel_sweeping() {
464 return static_cast<ParallelSweepingState>(
465 base::Acquire_Load(¶llel_sweeping_));
468 void set_parallel_sweeping(ParallelSweepingState state) {
469 base::Release_Store(¶llel_sweeping_, state);
472 bool TryParallelSweeping() {
473 return base::Acquire_CompareAndSwap(
474 ¶llel_sweeping_, PARALLEL_SWEEPING_PENDING,
475 PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
478 // Manage live byte count (count of bytes known to be live,
479 // because they are marked black).
480 void ResetLiveBytes() {
481 if (FLAG_gc_verbose) {
482 PrintF("ResetLiveBytes:%p:%x->0\n",
483 static_cast<void*>(this), live_byte_count_);
485 live_byte_count_ = 0;
487 void IncrementLiveBytes(int by) {
488 if (FLAG_gc_verbose) {
489 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
490 static_cast<void*>(this), live_byte_count_,
491 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
492 live_byte_count_ + by);
494 live_byte_count_ += by;
495 ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
498 ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
499 return live_byte_count_;
502 int write_barrier_counter() {
503 return static_cast<int>(write_barrier_counter_);
506 void set_write_barrier_counter(int counter) {
507 write_barrier_counter_ = counter;
511 ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
512 return progress_bar_;
515 void set_progress_bar(int progress_bar) {
516 ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
517 progress_bar_ = progress_bar;
520 void ResetProgressBar() {
521 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
523 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
527 bool IsLeftOfProgressBar(Object** slot) {
528 Address slot_address = reinterpret_cast<Address>(slot);
529 ASSERT(slot_address > this->address());
530 return (slot_address - (this->address() + kObjectStartOffset)) <
534 static void IncrementLiveBytesFromGC(Address address, int by) {
535 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
538 static void IncrementLiveBytesFromMutator(Address address, int by);
540 static const intptr_t kAlignment =
541 (static_cast<uintptr_t>(1) << kPageSizeBits);
543 static const intptr_t kAlignmentMask = kAlignment - 1;
545 static const intptr_t kSizeOffset = 0;
547 static const intptr_t kLiveBytesOffset =
548 kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
549 kPointerSize + kPointerSize +
550 kPointerSize + kPointerSize + kPointerSize + kIntSize;
552 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
554 static const size_t kWriteBarrierCounterOffset =
555 kSlotsBufferOffset + kPointerSize + kPointerSize;
557 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
558 kIntSize + kIntSize + kPointerSize +
560 kPointerSize + kPointerSize;
562 static const int kBodyOffset =
563 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
565 // The start offset of the object area in a page. Aligned to both maps and
566 // code alignment to be suitable for both. Also aligned to 32 words because
567 // the marking bitmap is arranged in 32 bit chunks.
568 static const int kObjectStartAlignment = 32 * kPointerSize;
569 static const int kObjectStartOffset = kBodyOffset - 1 +
570 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
572 size_t size() const { return size_; }
574 void set_size(size_t size) {
578 void SetArea(Address area_start, Address area_end) {
579 area_start_ = area_start;
580 area_end_ = area_end;
583 Executability executable() {
584 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
587 bool ContainsOnlyData() {
588 return IsFlagSet(CONTAINS_ONLY_DATA);
592 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
596 return IsFlagSet(IN_TO_SPACE);
600 return IsFlagSet(IN_FROM_SPACE);
603 // ---------------------------------------------------------------------
606 inline Bitmap* markbits() {
607 return Bitmap::FromAddress(address() + kHeaderSize);
610 void PrintMarkbits() { markbits()->Print(); }
612 inline uint32_t AddressToMarkbitIndex(Address addr) {
613 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
616 inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
617 const intptr_t offset =
618 reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
620 return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
623 inline Address MarkbitIndexToAddress(uint32_t index) {
624 return this->address() + (index << kPointerSizeLog2);
627 void InsertAfter(MemoryChunk* other);
630 inline Heap* heap() { return heap_; }
632 static const int kFlagsOffset = kPointerSize;
634 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
636 bool ShouldSkipEvacuationSlotRecording() {
637 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
640 inline SkipList* skip_list() {
644 inline void set_skip_list(SkipList* skip_list) {
645 skip_list_ = skip_list;
648 inline SlotsBuffer* slots_buffer() {
649 return slots_buffer_;
652 inline SlotsBuffer** slots_buffer_address() {
653 return &slots_buffer_;
656 void MarkEvacuationCandidate() {
657 ASSERT(slots_buffer_ == NULL);
658 SetFlag(EVACUATION_CANDIDATE);
661 void ClearEvacuationCandidate() {
662 ASSERT(slots_buffer_ == NULL);
663 ClearFlag(EVACUATION_CANDIDATE);
666 Address area_start() { return area_start_; }
667 Address area_end() { return area_end_; }
669 return static_cast<int>(area_end() - area_start());
671 bool CommitArea(size_t requested);
673 // Approximate amount of physical memory committed for this chunk.
674 size_t CommittedPhysicalMemory() {
675 return high_water_mark_;
678 static inline void UpdateHighWaterMark(Address mark);
684 // Start and end of allocatable memory on this chunk.
688 // If the chunk needs to remember its memory reservation, it is stored here.
689 VirtualMemory reservation_;
690 // The identity of the owning space. This is tagged as a failure pointer, but
691 // no failure can be in an object, so this can be distinguished from any entry
695 // Used by the store buffer to keep track of which pages to mark scan-on-
697 int store_buffer_counter_;
698 // Count of bytes marked black on page.
699 int live_byte_count_;
700 SlotsBuffer* slots_buffer_;
701 SkipList* skip_list_;
702 intptr_t write_barrier_counter_;
703 // Used by the incremental marker to keep track of the scanning progress in
704 // large objects that have a progress bar and are scanned in increments.
706 // Assuming the initial allocation on a page is sequential,
707 // count highest number of bytes ever allocated on the page.
708 int high_water_mark_;
710 base::AtomicWord parallel_sweeping_;
712 // PagedSpace free-list statistics.
713 intptr_t available_in_small_free_list_;
714 intptr_t available_in_medium_free_list_;
715 intptr_t available_in_large_free_list_;
716 intptr_t available_in_huge_free_list_;
717 intptr_t non_available_small_blocks_;
719 static MemoryChunk* Initialize(Heap* heap,
724 Executability executable,
728 // next_chunk_ holds a pointer of type MemoryChunk
729 base::AtomicWord next_chunk_;
730 // prev_chunk_ holds a pointer of type MemoryChunk
731 base::AtomicWord prev_chunk_;
733 friend class MemoryAllocator;
737 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
740 // -----------------------------------------------------------------------------
741 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
743 // The only way to get a page pointer is by calling factory methods:
744 // Page* p = Page::FromAddress(addr); or
745 // Page* p = Page::FromAllocationTop(top);
746 class Page : public MemoryChunk {
748 // Returns the page containing a given address. The address ranges
749 // from [page_addr .. page_addr + kPageSize[
750 // This only works if the object is in fact in a page. See also MemoryChunk::
751 // FromAddress() and FromAnyAddress().
752 INLINE(static Page* FromAddress(Address a)) {
753 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
756 // Returns the page containing an allocation top. Because an allocation
757 // top address can be the upper bound of the page, we need to subtract
758 // it with kPointerSize first. The address ranges from
759 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
760 INLINE(static Page* FromAllocationTop(Address top)) {
761 Page* p = FromAddress(top - kPointerSize);
765 // Returns the next page in the chain of pages owned by a space.
766 inline Page* next_page();
767 inline Page* prev_page();
768 inline void set_next_page(Page* page);
769 inline void set_prev_page(Page* page);
771 // Checks whether an address is page aligned.
772 static bool IsAlignedToPageSize(Address a) {
773 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
776 // Returns the offset of a given address to this page.
777 INLINE(int Offset(Address a)) {
778 int offset = static_cast<int>(a - address());
782 // Returns the address for a given offset to the this page.
783 Address OffsetToAddress(int offset) {
784 ASSERT_PAGE_OFFSET(offset);
785 return address() + offset;
788 // ---------------------------------------------------------------------
790 // Page size in bytes. This must be a multiple of the OS page size.
791 static const int kPageSize = 1 << kPageSizeBits;
793 // Maximum object size that fits in a page. Objects larger than that size
794 // are allocated in large object space and are never moved in memory. This
795 // also applies to new space allocation, since objects are never migrated
796 // from new space to large object space. Takes double alignment into account.
797 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
800 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
802 inline void ClearGCFields();
804 static inline Page* Initialize(Heap* heap,
806 Executability executable,
809 void InitializeAsAnchor(PagedSpace* owner);
811 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
812 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
813 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
815 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
816 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
818 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
819 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
821 void ResetFreeListStatistics();
823 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
824 type name() { return name##_; } \
825 void set_##name(type name) { name##_ = name; } \
826 void add_##name(type name) { name##_ += name; }
828 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
829 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
830 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
831 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
832 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
834 #undef FRAGMENTATION_STATS_ACCESSORS
840 friend class MemoryAllocator;
844 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
847 class LargePage : public MemoryChunk {
849 HeapObject* GetObject() {
850 return HeapObject::FromAddress(area_start());
853 inline LargePage* next_page() const {
854 return static_cast<LargePage*>(next_chunk());
857 inline void set_next_page(LargePage* page) {
858 set_next_chunk(page);
861 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
863 friend class MemoryAllocator;
866 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
868 // ----------------------------------------------------------------------------
869 // Space is the abstract superclass for all allocation spaces.
870 class Space : public Malloced {
872 Space(Heap* heap, AllocationSpace id, Executability executable)
873 : heap_(heap), id_(id), executable_(executable) {}
877 Heap* heap() const { return heap_; }
879 // Does the space need executable memory?
880 Executability executable() { return executable_; }
882 // Identity used in error reporting.
883 AllocationSpace identity() { return id_; }
885 // Returns allocated size.
886 virtual intptr_t Size() = 0;
888 // Returns size of objects. Can differ from the allocated size
889 // (e.g. see LargeObjectSpace).
890 virtual intptr_t SizeOfObjects() { return Size(); }
892 virtual int RoundSizeDownToObjectAlignment(int size) {
893 if (id_ == CODE_SPACE) {
894 return RoundDown(size, kCodeAlignment);
896 return RoundDown(size, kPointerSize);
901 virtual void Print() = 0;
907 Executability executable_;
911 // ----------------------------------------------------------------------------
912 // All heap objects containing executable code (code objects) must be allocated
913 // from a 2 GB range of memory, so that they can call each other using 32-bit
914 // displacements. This happens automatically on 32-bit platforms, where 32-bit
915 // displacements cover the entire 4GB virtual address space. On 64-bit
916 // platforms, we support this using the CodeRange object, which reserves and
917 // manages a range of virtual memory.
920 explicit CodeRange(Isolate* isolate);
921 ~CodeRange() { TearDown(); }
923 // Reserves a range of virtual memory, but does not commit any of it.
924 // Can only be called once, at heap initialization time.
925 // Returns false on failure.
926 bool SetUp(size_t requested_size);
928 // Frees the range of virtual memory, and frees the data structures used to
932 bool valid() { return code_range_ != NULL; }
935 return static_cast<Address>(code_range_->address());
937 bool contains(Address address) {
938 if (!valid()) return false;
939 Address start = static_cast<Address>(code_range_->address());
940 return start <= address && address < start + code_range_->size();
943 // Allocates a chunk of memory from the large-object portion of
944 // the code range. On platforms with no separate code range, should
946 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
947 const size_t commit_size,
949 bool CommitRawMemory(Address start, size_t length);
950 bool UncommitRawMemory(Address start, size_t length);
951 void FreeRawMemory(Address buf, size_t length);
956 // The reserved range of virtual memory that all code objects are put in.
957 VirtualMemory* code_range_;
958 // Plain old data class, just a struct plus a constructor.
961 FreeBlock(Address start_arg, size_t size_arg)
962 : start(start_arg), size(size_arg) {
963 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
964 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
966 FreeBlock(void* start_arg, size_t size_arg)
967 : start(static_cast<Address>(start_arg)), size(size_arg) {
968 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
969 ASSERT(size >= static_cast<size_t>(Page::kPageSize));
976 // Freed blocks of memory are added to the free list. When the allocation
977 // list is exhausted, the free list is sorted and merged to make the new
979 List<FreeBlock> free_list_;
980 // Memory is allocated from the free blocks on the allocation list.
981 // The block at current_allocation_block_index_ is the current block.
982 List<FreeBlock> allocation_list_;
983 int current_allocation_block_index_;
985 // Finds a block on the allocation list that contains at least the
986 // requested amount of memory. If none is found, sorts and merges
987 // the existing free memory blocks, and searches again.
988 // If none can be found, returns false.
989 bool GetNextAllocationBlock(size_t requested);
990 // Compares the start addresses of two free blocks.
991 static int CompareFreeBlockAddress(const FreeBlock* left,
992 const FreeBlock* right);
994 DISALLOW_COPY_AND_ASSIGN(CodeRange);
1005 for (int idx = 0; idx < kSize; idx++) {
1006 starts_[idx] = reinterpret_cast<Address>(-1);
1010 Address StartFor(Address addr) {
1011 return starts_[RegionNumber(addr)];
1014 void AddObject(Address addr, int size) {
1015 int start_region = RegionNumber(addr);
1016 int end_region = RegionNumber(addr + size - kPointerSize);
1017 for (int idx = start_region; idx <= end_region; idx++) {
1018 if (starts_[idx] > addr) starts_[idx] = addr;
1022 static inline int RegionNumber(Address addr) {
1023 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1026 static void Update(Address addr, int size) {
1027 Page* page = Page::FromAddress(addr);
1028 SkipList* list = page->skip_list();
1030 list = new SkipList();
1031 page->set_skip_list(list);
1034 list->AddObject(addr, size);
1038 static const int kRegionSizeLog2 = 13;
1039 static const int kRegionSize = 1 << kRegionSizeLog2;
1040 static const int kSize = Page::kPageSize / kRegionSize;
1042 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1044 Address starts_[kSize];
1048 // ----------------------------------------------------------------------------
1049 // A space acquires chunks of memory from the operating system. The memory
1050 // allocator allocated and deallocates pages for the paged heap spaces and large
1051 // pages for large object space.
1053 // Each space has to manage it's own pages.
1055 class MemoryAllocator {
1057 explicit MemoryAllocator(Isolate* isolate);
1059 // Initializes its internal bookkeeping structures.
1060 // Max capacity of the total space and executable memory limit.
1061 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1066 intptr_t size, PagedSpace* owner, Executability executable);
1068 LargePage* AllocateLargePage(
1069 intptr_t object_size, Space* owner, Executability executable);
1071 void Free(MemoryChunk* chunk);
1073 // Returns the maximum available bytes of heaps.
1074 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1076 // Returns allocated spaces in bytes.
1077 intptr_t Size() { return size_; }
1079 // Returns the maximum available executable bytes of heaps.
1080 intptr_t AvailableExecutable() {
1081 if (capacity_executable_ < size_executable_) return 0;
1082 return capacity_executable_ - size_executable_;
1085 // Returns allocated executable spaces in bytes.
1086 intptr_t SizeExecutable() { return size_executable_; }
1088 // Returns maximum available bytes that the old space can have.
1089 intptr_t MaxAvailable() {
1090 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1093 // Returns an indication of whether a pointer is in a space that has
1094 // been allocated by this MemoryAllocator.
1095 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1096 return address < lowest_ever_allocated_ ||
1097 address >= highest_ever_allocated_;
1101 // Reports statistic info of the space.
1102 void ReportStatistics();
1105 // Returns a MemoryChunk in which the memory region from commit_area_size to
1106 // reserve_area_size of the chunk area is reserved but not committed, it
1107 // could be committed later by calling MemoryChunk::CommitArea.
1108 MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1109 intptr_t commit_area_size,
1110 Executability executable,
1113 Address ReserveAlignedMemory(size_t requested,
1115 VirtualMemory* controller);
1116 Address AllocateAlignedMemory(size_t reserve_size,
1119 Executability executable,
1120 VirtualMemory* controller);
1122 bool CommitMemory(Address addr, size_t size, Executability executable);
1124 void FreeMemory(VirtualMemory* reservation, Executability executable);
1125 void FreeMemory(Address addr, size_t size, Executability executable);
1127 // Commit a contiguous block of memory from the initial chunk. Assumes that
1128 // the address is not NULL, the size is greater than zero, and that the
1129 // block is contained in the initial chunk. Returns true if it succeeded
1130 // and false otherwise.
1131 bool CommitBlock(Address start, size_t size, Executability executable);
1133 // Uncommit a contiguous block of memory [start..(start+size)[.
1134 // start is not NULL, the size is greater than zero, and the
1135 // block is contained in the initial chunk. Returns true if it succeeded
1136 // and false otherwise.
1137 bool UncommitBlock(Address start, size_t size);
1139 // Zaps a contiguous block of memory [start..(start+size)[ thus
1140 // filling it up with a recognizable non-NULL bit pattern.
1141 void ZapBlock(Address start, size_t size);
1143 void PerformAllocationCallback(ObjectSpace space,
1144 AllocationAction action,
1147 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1149 AllocationAction action);
1151 void RemoveMemoryAllocationCallback(
1152 MemoryAllocationCallback callback);
1154 bool MemoryAllocationCallbackRegistered(
1155 MemoryAllocationCallback callback);
1157 static int CodePageGuardStartOffset();
1159 static int CodePageGuardSize();
1161 static int CodePageAreaStartOffset();
1163 static int CodePageAreaEndOffset();
1165 static int CodePageAreaSize() {
1166 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1169 MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1172 size_t reserved_size);
1177 // Maximum space size in bytes.
1179 // Maximum subset of capacity_ that can be executable
1180 size_t capacity_executable_;
1182 // Allocated space size in bytes.
1184 // Allocated executable space size in bytes.
1185 size_t size_executable_;
1187 // We keep the lowest and highest addresses allocated as a quick way
1188 // of determining that pointers are outside the heap. The estimate is
1189 // conservative, i.e. not all addrsses in 'allocated' space are allocated
1190 // to our heap. The range is [lowest, highest[, inclusive on the low end
1191 // and exclusive on the high end.
1192 void* lowest_ever_allocated_;
1193 void* highest_ever_allocated_;
1195 struct MemoryAllocationCallbackRegistration {
1196 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1198 AllocationAction action)
1199 : callback(callback), space(space), action(action) {
1201 MemoryAllocationCallback callback;
1203 AllocationAction action;
1206 // A List of callback that are triggered when memory is allocated or free'd
1207 List<MemoryAllocationCallbackRegistration>
1208 memory_allocation_callbacks_;
1210 // Initializes pages in a chunk. Returns the first page address.
1211 // This function and GetChunkId() are provided for the mark-compact
1212 // collector to rebuild page headers in the from space, which is
1213 // used as a marking stack and its page headers are destroyed.
1214 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1217 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1218 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1219 highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1222 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1226 // -----------------------------------------------------------------------------
1227 // Interface for heap object iterator to be implemented by all object space
1228 // object iterators.
1230 // NOTE: The space specific object iterators also implements the own next()
1231 // method which is used to avoid using virtual functions
1232 // iterating a specific space.
1234 class ObjectIterator : public Malloced {
1236 virtual ~ObjectIterator() { }
1238 virtual HeapObject* next_object() = 0;
1242 // -----------------------------------------------------------------------------
1243 // Heap object iterator in new/old/map spaces.
1245 // A HeapObjectIterator iterates objects from the bottom of the given space
1246 // to its top or from the bottom of the given page to its top.
1248 // If objects are allocated in the page during iteration the iterator may
1249 // or may not iterate over those objects. The caller must create a new
1250 // iterator in order to be sure to visit these new objects.
1251 class HeapObjectIterator: public ObjectIterator {
1253 // Creates a new object iterator in a given space.
1254 // If the size function is not given, the iterator calls the default
1256 explicit HeapObjectIterator(PagedSpace* space);
1257 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1258 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1260 // Advance to the next object, skipping free spaces and other fillers and
1261 // skipping the special garbage section of which there is one per space.
1262 // Returns NULL when the iteration has ended.
1263 inline HeapObject* Next() {
1265 HeapObject* next_obj = FromCurrentPage();
1266 if (next_obj != NULL) return next_obj;
1267 } while (AdvanceToNextPage());
1271 virtual HeapObject* next_object() {
1276 enum PageMode { kOnePageOnly, kAllPagesInSpace };
1278 Address cur_addr_; // Current iteration point.
1279 Address cur_end_; // End iteration point.
1280 HeapObjectCallback size_func_; // Size function or NULL.
1282 PageMode page_mode_;
1284 // Fast (inlined) path of next().
1285 inline HeapObject* FromCurrentPage();
1287 // Slow path of next(), goes into the next page. Returns false if the
1288 // iteration has ended.
1289 bool AdvanceToNextPage();
1291 // Initializes fields.
1292 inline void Initialize(PagedSpace* owner,
1296 HeapObjectCallback size_func);
1300 // -----------------------------------------------------------------------------
1301 // A PageIterator iterates the pages in a paged space.
1303 class PageIterator BASE_EMBEDDED {
1305 explicit inline PageIterator(PagedSpace* space);
1307 inline bool has_next();
1308 inline Page* next();
1312 Page* prev_page_; // Previous page returned.
1313 // Next page that will be returned. Cached here so that we can use this
1314 // iterator for operations that deallocate pages.
1319 // -----------------------------------------------------------------------------
1320 // A space has a circular list of pages. The next page can be accessed via
1321 // Page::next_page() call.
1323 // An abstraction of allocation and relocation pointers in a page-structured
1325 class AllocationInfo {
1327 AllocationInfo() : top_(NULL), limit_(NULL) {
1330 INLINE(void set_top(Address top)) {
1331 SLOW_ASSERT(top == NULL ||
1332 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1336 INLINE(Address top()) const {
1337 SLOW_ASSERT(top_ == NULL ||
1338 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1342 Address* top_address() {
1346 INLINE(void set_limit(Address limit)) {
1347 SLOW_ASSERT(limit == NULL ||
1348 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1352 INLINE(Address limit()) const {
1353 SLOW_ASSERT(limit_ == NULL ||
1354 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
1358 Address* limit_address() {
1363 bool VerifyPagedAllocation() {
1364 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1365 && (top_ <= limit_);
1370 // Current allocation top.
1372 // Current allocation limit.
1377 // An abstraction of the accounting statistics of a page-structured space.
1378 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1379 // including page bookkeeping structures) currently in the space. The 'size'
1380 // of a space is the number of allocated bytes, the 'waste' in the space is
1381 // the number of bytes that are not allocated and not available to
1382 // allocation without reorganizing the space via a GC (e.g. small blocks due
1383 // to internal fragmentation, top of page areas in map space), and the bytes
1384 // 'available' is the number of unallocated bytes that are not waste. The
1385 // capacity is the sum of size, waste, and available.
1387 // The stats are only set by functions that ensure they stay balanced. These
1388 // functions increase or decrease one of the non-capacity stats in
1389 // conjunction with capacity, or else they always balance increases and
1390 // decreases to the non-capacity stats.
1391 class AllocationStats BASE_EMBEDDED {
1393 AllocationStats() { Clear(); }
1395 // Zero out all the allocation statistics (i.e., no capacity).
1403 void ClearSizeWaste() {
1408 // Reset the allocation statistics (i.e., available = capacity with no
1409 // wasted or allocated bytes).
1415 // Accessors for the allocation statistics.
1416 intptr_t Capacity() { return capacity_; }
1417 intptr_t MaxCapacity() { return max_capacity_; }
1418 intptr_t Size() { return size_; }
1419 intptr_t Waste() { return waste_; }
1421 // Grow the space by adding available bytes. They are initially marked as
1422 // being in use (part of the size), but will normally be immediately freed,
1423 // putting them on the free list and removing them from size_.
1424 void ExpandSpace(int size_in_bytes) {
1425 capacity_ += size_in_bytes;
1426 size_ += size_in_bytes;
1427 if (capacity_ > max_capacity_) {
1428 max_capacity_ = capacity_;
1433 // Shrink the space by removing available bytes. Since shrinking is done
1434 // during sweeping, bytes have been marked as being in use (part of the size)
1435 // and are hereby freed.
1436 void ShrinkSpace(int size_in_bytes) {
1437 capacity_ -= size_in_bytes;
1438 size_ -= size_in_bytes;
1442 // Allocate from available bytes (available -> size).
1443 void AllocateBytes(intptr_t size_in_bytes) {
1444 size_ += size_in_bytes;
1448 // Free allocated bytes, making them available (size -> available).
1449 void DeallocateBytes(intptr_t size_in_bytes) {
1450 size_ -= size_in_bytes;
1454 // Waste free bytes (available -> waste).
1455 void WasteBytes(int size_in_bytes) {
1456 size_ -= size_in_bytes;
1457 waste_ += size_in_bytes;
1463 intptr_t max_capacity_;
1469 // -----------------------------------------------------------------------------
1470 // Free lists for old object spaces
1472 // Free-list nodes are free blocks in the heap. They look like heap objects
1473 // (free-list node pointers have the heap object tag, and they have a map like
1474 // a heap object). They have a size and a next pointer. The next pointer is
1475 // the raw address of the next free list node (or NULL).
1476 class FreeListNode: public HeapObject {
1478 // Obtain a free-list node from a raw address. This is not a cast because
1479 // it does not check nor require that the first word at the address is a map
1481 static FreeListNode* FromAddress(Address address) {
1482 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1485 static inline bool IsFreeListNode(HeapObject* object);
1487 // Set the size in bytes, which can be read with HeapObject::Size(). This
1488 // function also writes a map to the first word of the block so that it
1489 // looks like a heap object to the garbage collector and heap iteration
1491 void set_size(Heap* heap, int size_in_bytes);
1493 // Accessors for the next field.
1494 inline FreeListNode* next();
1495 inline FreeListNode** next_address();
1496 inline void set_next(FreeListNode* next);
1500 static inline FreeListNode* cast(Object* object) {
1501 return reinterpret_cast<FreeListNode*>(object);
1505 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1507 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1511 // The free list category holds a pointer to the top element and a pointer to
1512 // the end element of the linked list of free memory blocks.
1513 class FreeListCategory {
1515 FreeListCategory() :
1520 intptr_t Concatenate(FreeListCategory* category);
1524 void Free(FreeListNode* node, int size_in_bytes);
1526 FreeListNode* PickNodeFromList(int *node_size);
1527 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1529 intptr_t EvictFreeListItemsInList(Page* p);
1530 bool ContainsPageFreeListItemsInList(Page* p);
1532 void RepairFreeList(Heap* heap);
1534 FreeListNode* top() const {
1535 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1538 void set_top(FreeListNode* top) {
1539 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1542 FreeListNode** GetEndAddress() { return &end_; }
1543 FreeListNode* end() const { return end_; }
1544 void set_end(FreeListNode* end) { end_ = end; }
1546 int* GetAvailableAddress() { return &available_; }
1547 int available() const { return available_; }
1548 void set_available(int available) { available_ = available; }
1550 Mutex* mutex() { return &mutex_; }
1557 intptr_t SumFreeList();
1558 int FreeListLength();
1562 // top_ points to the top FreeListNode* in the free list category.
1563 base::AtomicWord top_;
1567 // Total available bytes in all blocks of this free list category.
1572 // The free list for the old space. The free list is organized in such a way
1573 // as to encourage objects allocated around the same time to be near each
1574 // other. The normal way to allocate is intended to be by bumping a 'top'
1575 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1576 // find a new space to allocate from. This is done with the free list, which
1577 // is divided up into rough categories to cut down on waste. Having finer
1578 // categories would scatter allocation more.
1580 // The old space free list is organized in categories.
1581 // 1-31 words: Such small free areas are discarded for efficiency reasons.
1582 // They can be reclaimed by the compactor. However the distance between top
1583 // and limit may be this small.
1584 // 32-255 words: There is a list of spaces this large. It is used for top and
1585 // limit when the object we need to allocate is 1-31 words in size. These
1586 // spaces are called small.
1587 // 256-2047 words: There is a list of spaces this large. It is used for top and
1588 // limit when the object we need to allocate is 32-255 words in size. These
1589 // spaces are called medium.
1590 // 1048-16383 words: There is a list of spaces this large. It is used for top
1591 // and limit when the object we need to allocate is 256-2047 words in size.
1592 // These spaces are call large.
1593 // At least 16384 words. This list is for objects of 2048 words or larger.
1594 // Empty pages are added to this list. These spaces are called huge.
1597 explicit FreeList(PagedSpace* owner);
1599 intptr_t Concatenate(FreeList* free_list);
1601 // Clear the free list.
1604 // Return the number of bytes available on the free list.
1605 intptr_t available() {
1606 return small_list_.available() + medium_list_.available() +
1607 large_list_.available() + huge_list_.available();
1610 // Place a node on the free list. The block of size 'size_in_bytes'
1611 // starting at 'start' is placed on the free list. The return value is the
1612 // number of bytes that have been lost due to internal fragmentation by
1613 // freeing the block. Bookkeeping information will be written to the block,
1614 // i.e., its contents will be destroyed. The start address should be word
1615 // aligned, and the size should be a non-zero multiple of the word size.
1616 int Free(Address start, int size_in_bytes);
1618 // Allocate a block of size 'size_in_bytes' from the free list. The block
1619 // is unitialized. A failure is returned if no block is available. The
1620 // number of bytes lost to fragmentation is returned in the output parameter
1621 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1622 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1625 return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1626 large_list_.IsEmpty() && huge_list_.IsEmpty();
1631 intptr_t SumFreeLists();
1635 // Used after booting the VM.
1636 void RepairLists(Heap* heap);
1638 intptr_t EvictFreeListItems(Page* p);
1639 bool ContainsPageFreeListItems(Page* p);
1641 FreeListCategory* small_list() { return &small_list_; }
1642 FreeListCategory* medium_list() { return &medium_list_; }
1643 FreeListCategory* large_list() { return &large_list_; }
1644 FreeListCategory* huge_list() { return &huge_list_; }
1647 // The size range of blocks, in bytes.
1648 static const int kMinBlockSize = 3 * kPointerSize;
1649 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1651 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1656 static const int kSmallListMin = 0x20 * kPointerSize;
1657 static const int kSmallListMax = 0xff * kPointerSize;
1658 static const int kMediumListMax = 0x7ff * kPointerSize;
1659 static const int kLargeListMax = 0x3fff * kPointerSize;
1660 static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1661 static const int kMediumAllocationMax = kSmallListMax;
1662 static const int kLargeAllocationMax = kMediumListMax;
1663 FreeListCategory small_list_;
1664 FreeListCategory medium_list_;
1665 FreeListCategory large_list_;
1666 FreeListCategory huge_list_;
1668 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1672 class AllocationResult {
1674 // Implicit constructor from Object*.
1675 AllocationResult(Object* object) : object_(object), // NOLINT
1676 retry_space_(INVALID_SPACE) { }
1678 AllocationResult() : object_(NULL),
1679 retry_space_(INVALID_SPACE) { }
1681 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1682 return AllocationResult(space);
1685 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1687 template <typename T>
1689 if (IsRetry()) return false;
1690 *obj = T::cast(object_);
1694 Object* ToObjectChecked() {
1699 AllocationSpace RetrySpace() {
1701 return retry_space_;
1705 explicit AllocationResult(AllocationSpace space) : object_(NULL),
1706 retry_space_(space) { }
1709 AllocationSpace retry_space_;
1713 class PagedSpace : public Space {
1715 // Creates a space with a maximum capacity, and an id.
1716 PagedSpace(Heap* heap,
1717 intptr_t max_capacity,
1719 Executability executable);
1721 virtual ~PagedSpace() {}
1723 // Set up the space using the given address range of virtual memory (from
1724 // the memory allocator's initial chunk) if possible. If the block of
1725 // addresses is not big enough to contain a single page-aligned page, a
1726 // fresh chunk will be allocated.
1729 // Returns true if the space has been successfully set up and not
1730 // subsequently torn down.
1731 bool HasBeenSetUp();
1733 // Cleans up the space, frees all pages in this space except those belonging
1734 // to the initial chunk, uncommits addresses in the initial chunk.
1737 // Checks whether an object/address is in this space.
1738 inline bool Contains(Address a);
1739 bool Contains(HeapObject* o) { return Contains(o->address()); }
1741 // Given an address occupied by a live object, return that object if it is
1742 // in this space, or a Smi if it is not. The implementation iterates over
1743 // objects in the page containing the address, the cost is linear in the
1744 // number of objects in the page. It may be slow.
1745 Object* FindObject(Address addr);
1747 // During boot the free_space_map is created, and afterwards we may need
1748 // to write it into the free list nodes that were already created.
1749 void RepairFreeListsAfterBoot();
1751 // Prepares for a mark-compact GC.
1752 void PrepareForMarkCompact();
1754 // Current capacity without growing (Size() + Available()).
1755 intptr_t Capacity() { return accounting_stats_.Capacity(); }
1757 // Total amount of memory committed for this space. For paged
1758 // spaces this equals the capacity.
1759 intptr_t CommittedMemory() { return Capacity(); }
1761 // The maximum amount of memory ever committed for this space.
1762 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1764 // Approximate amount of physical memory committed for this space.
1765 size_t CommittedPhysicalMemory();
1769 return small_size_ + medium_size_ + large_size_ + huge_size_;
1772 intptr_t small_size_;
1773 intptr_t medium_size_;
1774 intptr_t large_size_;
1775 intptr_t huge_size_;
1778 void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1779 void ResetFreeListStatistics();
1781 // Sets the capacity, the available space and the wasted space to zero.
1782 // The stats are rebuilt during sweeping by adding each page to the
1783 // capacity and the size when it is encountered. As free spaces are
1784 // discovered during the sweeping they are subtracted from the size and added
1785 // to the available and wasted totals.
1787 accounting_stats_.ClearSizeWaste();
1788 ResetFreeListStatistics();
1791 // Increases the number of available bytes of that space.
1792 void AddToAccountingStats(intptr_t bytes) {
1793 accounting_stats_.DeallocateBytes(bytes);
1796 // Available bytes without growing. These are the bytes on the free list.
1797 // The bytes in the linear allocation area are not included in this total
1798 // because updating the stats would slow down allocation. New pages are
1799 // immediately added to the free list so they show up here.
1800 intptr_t Available() { return free_list_.available(); }
1802 // Allocated bytes in this space. Garbage bytes that were not found due to
1803 // concurrent sweeping are counted as being allocated! The bytes in the
1804 // current linear allocation area (between top and limit) are also counted
1806 virtual intptr_t Size() { return accounting_stats_.Size(); }
1808 // As size, but the bytes in lazily swept pages are estimated and the bytes
1809 // in the current linear allocation area are not included.
1810 virtual intptr_t SizeOfObjects();
1812 // Wasted bytes in this space. These are just the bytes that were thrown away
1813 // due to being too small to use for allocation. They do not include the
1814 // free bytes that were not found at all due to lazy sweeping.
1815 virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1817 // Returns the allocation pointer in this space.
1818 Address top() { return allocation_info_.top(); }
1819 Address limit() { return allocation_info_.limit(); }
1821 // The allocation top address.
1822 Address* allocation_top_address() {
1823 return allocation_info_.top_address();
1826 // The allocation limit address.
1827 Address* allocation_limit_address() {
1828 return allocation_info_.limit_address();
1831 // Allocate the requested number of bytes in the space if possible, return a
1832 // failure object if not.
1833 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
1835 // Give a block of memory to the space's free list. It might be added to
1836 // the free list or accounted as waste.
1837 // If add_to_freelist is false then just accounting stats are updated and
1838 // no attempt to add area to free list is made.
1839 int Free(Address start, int size_in_bytes) {
1840 int wasted = free_list_.Free(start, size_in_bytes);
1841 accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1842 return size_in_bytes - wasted;
1845 void ResetFreeList() {
1849 // Set space allocation info.
1850 void SetTopAndLimit(Address top, Address limit) {
1851 ASSERT(top == limit ||
1852 Page::FromAddress(top) == Page::FromAddress(limit - 1));
1853 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1854 allocation_info_.set_top(top);
1855 allocation_info_.set_limit(limit);
1858 // Empty space allocation info, returning unused area to free list.
1859 void EmptyAllocationInfo() {
1860 // Mark the old linear allocation area with a free space map so it can be
1861 // skipped when scanning the heap.
1862 int old_linear_size = static_cast<int>(limit() - top());
1863 Free(top(), old_linear_size);
1864 SetTopAndLimit(NULL, NULL);
1867 void Allocate(int bytes) {
1868 accounting_stats_.AllocateBytes(bytes);
1871 void IncreaseCapacity(int size);
1873 // Releases an unused page and shrinks the space.
1874 void ReleasePage(Page* page);
1876 // The dummy page that anchors the linked list of pages.
1877 Page* anchor() { return &anchor_; }
1880 // Verify integrity of this space.
1881 virtual void Verify(ObjectVisitor* visitor);
1883 // Overridden by subclasses to verify space-specific object
1884 // properties (e.g., only maps or free-list nodes are in map space).
1885 virtual void VerifyObject(HeapObject* obj) {}
1889 // Print meta info and objects in this space.
1890 virtual void Print();
1892 // Reports statistics for the space
1893 void ReportStatistics();
1895 // Report code object related statistics
1896 void CollectCodeStatistics();
1897 static void ReportCodeStatistics(Isolate* isolate);
1898 static void ResetCodeStatistics(Isolate* isolate);
1901 bool was_swept_conservatively() { return was_swept_conservatively_; }
1902 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
1904 // Evacuation candidates are swept by evacuator. Needs to return a valid
1905 // result before _and_ after evacuation has finished.
1906 static bool ShouldBeSweptBySweeperThreads(Page* p) {
1907 return !p->IsEvacuationCandidate() &&
1908 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1909 !p->WasSweptPrecisely();
1912 void IncrementUnsweptFreeBytes(intptr_t by) {
1913 unswept_free_bytes_ += by;
1916 void IncreaseUnsweptFreeBytes(Page* p) {
1917 ASSERT(ShouldBeSweptBySweeperThreads(p));
1918 unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1921 void DecrementUnsweptFreeBytes(intptr_t by) {
1922 unswept_free_bytes_ -= by;
1925 void DecreaseUnsweptFreeBytes(Page* p) {
1926 ASSERT(ShouldBeSweptBySweeperThreads(p));
1927 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1930 void ResetUnsweptFreeBytes() {
1931 unswept_free_bytes_ = 0;
1934 // This function tries to steal size_in_bytes memory from the sweeper threads
1935 // free-lists. If it does not succeed stealing enough memory, it will wait
1936 // for the sweeper threads to finish sweeping.
1937 // It returns true when sweeping is completed and false otherwise.
1938 bool EnsureSweeperProgress(intptr_t size_in_bytes);
1940 void set_end_of_unswept_pages(Page* page) {
1941 end_of_unswept_pages_ = page;
1944 Page* end_of_unswept_pages() {
1945 return end_of_unswept_pages_;
1948 Page* FirstPage() { return anchor_.next_page(); }
1949 Page* LastPage() { return anchor_.prev_page(); }
1951 void EvictEvacuationCandidatesFromFreeLists();
1955 // Returns the number of total pages in this space.
1956 int CountTotalPages();
1958 // Return size of allocatable area on a page in this space.
1959 inline int AreaSize() {
1964 FreeList* free_list() { return &free_list_; }
1968 // Maximum capacity of this space.
1969 intptr_t max_capacity_;
1971 intptr_t SizeOfFirstPage();
1973 // Accounting information for this space.
1974 AllocationStats accounting_stats_;
1976 // The dummy page that anchors the double linked list of pages.
1979 // The space's free list.
1980 FreeList free_list_;
1982 // Normal allocation information.
1983 AllocationInfo allocation_info_;
1985 bool was_swept_conservatively_;
1987 // The number of free bytes which could be reclaimed by advancing the
1988 // concurrent sweeper threads. This is only an estimation because concurrent
1989 // sweeping is done conservatively.
1990 intptr_t unswept_free_bytes_;
1992 // The sweeper threads iterate over the list of pointer and data space pages
1993 // and sweep these pages concurrently. They will stop sweeping after the
1994 // end_of_unswept_pages_ page.
1995 Page* end_of_unswept_pages_;
1997 // Expands the space by allocating a fixed number of pages. Returns false if
1998 // it cannot allocate requested number of pages from OS, or if the hard heap
1999 // size limit has been hit.
2002 // Generic fast case allocation function that tries linear allocation at the
2003 // address denoted by top in allocation_info_.
2004 inline HeapObject* AllocateLinearly(int size_in_bytes);
2006 MUST_USE_RESULT HeapObject*
2007 WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
2009 // Slow path of AllocateRaw. This function is space-dependent.
2010 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2012 friend class PageIterator;
2013 friend class MarkCompactCollector;
2017 class NumberAndSizeInfo BASE_EMBEDDED {
2019 NumberAndSizeInfo() : number_(0), bytes_(0) {}
2021 int number() const { return number_; }
2022 void increment_number(int num) { number_ += num; }
2024 int bytes() const { return bytes_; }
2025 void increment_bytes(int size) { bytes_ += size; }
2038 // HistogramInfo class for recording a single "bar" of a histogram. This
2039 // class is used for collecting statistics to print to the log file.
2040 class HistogramInfo: public NumberAndSizeInfo {
2042 HistogramInfo() : NumberAndSizeInfo() {}
2044 const char* name() { return name_; }
2045 void set_name(const char* name) { name_ = name; }
2061 class NewSpacePage : public MemoryChunk {
2063 // GC related flags copied from from-space to to-space when
2064 // flipping semispaces.
2065 static const intptr_t kCopyOnFlipFlagsMask =
2066 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2067 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2068 (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2070 static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2072 inline NewSpacePage* next_page() const {
2073 return static_cast<NewSpacePage*>(next_chunk());
2076 inline void set_next_page(NewSpacePage* page) {
2077 set_next_chunk(page);
2080 inline NewSpacePage* prev_page() const {
2081 return static_cast<NewSpacePage*>(prev_chunk());
2084 inline void set_prev_page(NewSpacePage* page) {
2085 set_prev_chunk(page);
2088 SemiSpace* semi_space() {
2089 return reinterpret_cast<SemiSpace*>(owner());
2092 bool is_anchor() { return !this->InNewSpace(); }
2094 static bool IsAtStart(Address addr) {
2095 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2096 == kObjectStartOffset;
2099 static bool IsAtEnd(Address addr) {
2100 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2104 return reinterpret_cast<Address>(this);
2107 // Finds the NewSpacePage containg the given address.
2108 static inline NewSpacePage* FromAddress(Address address_in_page) {
2109 Address page_start =
2110 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2111 ~Page::kPageAlignmentMask);
2112 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2116 // Find the page for a limit address. A limit address is either an address
2117 // inside a page, or the address right after the last byte of a page.
2118 static inline NewSpacePage* FromLimit(Address address_limit) {
2119 return NewSpacePage::FromAddress(address_limit - 1);
2122 // Checks if address1 and address2 are on the same new space page.
2123 static inline bool OnSamePage(Address address1, Address address2) {
2124 return NewSpacePage::FromAddress(address1) ==
2125 NewSpacePage::FromAddress(address2);
2129 // Create a NewSpacePage object that is only used as anchor
2130 // for the doubly-linked list of real pages.
2131 explicit NewSpacePage(SemiSpace* owner) {
2132 InitializeAsAnchor(owner);
2135 static NewSpacePage* Initialize(Heap* heap,
2137 SemiSpace* semi_space);
2139 // Intialize a fake NewSpacePage used as sentinel at the ends
2140 // of a doubly-linked list of real NewSpacePages.
2141 // Only uses the prev/next links, and sets flags to not be in new-space.
2142 void InitializeAsAnchor(SemiSpace* owner);
2144 friend class SemiSpace;
2145 friend class SemiSpaceIterator;
2149 // -----------------------------------------------------------------------------
2150 // SemiSpace in young generation
2152 // A semispace is a contiguous chunk of memory holding page-like memory
2153 // chunks. The mark-compact collector uses the memory of the first page in
2154 // the from space as a marking stack when tracing live objects.
2156 class SemiSpace : public Space {
2159 SemiSpace(Heap* heap, SemiSpaceId semispace)
2160 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2165 current_page_(NULL) { }
2167 // Sets up the semispace using the given chunk.
2168 void SetUp(Address start, int initial_capacity, int maximum_capacity);
2170 // Tear down the space. Heap memory was not allocated by the space, so it
2171 // is not deallocated here.
2174 // True if the space has been set up but not torn down.
2175 bool HasBeenSetUp() { return start_ != NULL; }
2177 // Grow the semispace to the new capacity. The new capacity
2178 // requested must be larger than the current capacity and less than
2179 // the maximum capacity.
2180 bool GrowTo(int new_capacity);
2182 // Shrinks the semispace to the new capacity. The new capacity
2183 // requested must be more than the amount of used memory in the
2184 // semispace and less than the current capacity.
2185 bool ShrinkTo(int new_capacity);
2187 // Returns the start address of the first page of the space.
2188 Address space_start() {
2189 ASSERT(anchor_.next_page() != &anchor_);
2190 return anchor_.next_page()->area_start();
2193 // Returns the start address of the current page of the space.
2194 Address page_low() {
2195 return current_page_->area_start();
2198 // Returns one past the end address of the space.
2199 Address space_end() {
2200 return anchor_.prev_page()->area_end();
2203 // Returns one past the end address of the current page of the space.
2204 Address page_high() {
2205 return current_page_->area_end();
2208 bool AdvancePage() {
2209 NewSpacePage* next_page = current_page_->next_page();
2210 if (next_page == anchor()) return false;
2211 current_page_ = next_page;
2215 // Resets the space to using the first page.
2218 // Age mark accessors.
2219 Address age_mark() { return age_mark_; }
2220 void set_age_mark(Address mark);
2222 // True if the address is in the address range of this semispace (not
2223 // necessarily below the allocation pointer).
2224 bool Contains(Address a) {
2225 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2226 == reinterpret_cast<uintptr_t>(start_);
2229 // True if the object is a heap object in the address range of this
2230 // semispace (not necessarily below the allocation pointer).
2231 bool Contains(Object* o) {
2232 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2235 // If we don't have these here then SemiSpace will be abstract. However
2236 // they should never be called.
2237 virtual intptr_t Size() {
2242 bool is_committed() { return committed_; }
2246 NewSpacePage* first_page() { return anchor_.next_page(); }
2247 NewSpacePage* current_page() { return current_page_; }
2250 virtual void Verify();
2254 virtual void Print();
2255 // Validate a range of of addresses in a SemiSpace.
2256 // The "from" address must be on a page prior to the "to" address,
2257 // in the linked page order, or it must be earlier on the same page.
2258 static void AssertValidRange(Address from, Address to);
2261 inline static void AssertValidRange(Address from, Address to) {}
2264 // Returns the current capacity of the semi space.
2265 int Capacity() { return capacity_; }
2267 // Returns the maximum capacity of the semi space.
2268 int MaximumCapacity() { return maximum_capacity_; }
2270 // Returns the initial capacity of the semi space.
2271 int InitialCapacity() { return initial_capacity_; }
2273 SemiSpaceId id() { return id_; }
2275 static void Swap(SemiSpace* from, SemiSpace* to);
2277 // Returns the maximum amount of memory ever committed by the semi space.
2278 size_t MaximumCommittedMemory() { return maximum_committed_; }
2280 // Approximate amount of physical memory committed for this space.
2281 size_t CommittedPhysicalMemory();
2284 // Flips the semispace between being from-space and to-space.
2285 // Copies the flags into the masked positions on all pages in the space.
2286 void FlipPages(intptr_t flags, intptr_t flag_mask);
2288 // Updates Capacity and MaximumCommitted based on new capacity.
2289 void SetCapacity(int new_capacity);
2291 NewSpacePage* anchor() { return &anchor_; }
2293 // The current and maximum capacity of the space.
2295 int maximum_capacity_;
2296 int initial_capacity_;
2298 intptr_t maximum_committed_;
2300 // The start address of the space.
2302 // Used to govern object promotion during mark-compact collection.
2305 // Masks and comparison values to test for containment in this semispace.
2306 uintptr_t address_mask_;
2307 uintptr_t object_mask_;
2308 uintptr_t object_expected_;
2313 NewSpacePage anchor_;
2314 NewSpacePage* current_page_;
2316 friend class SemiSpaceIterator;
2317 friend class NewSpacePageIterator;
2319 TRACK_MEMORY("SemiSpace")
2323 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2324 // semispace of the heap's new space. It iterates over the objects in the
2325 // semispace from a given start address (defaulting to the bottom of the
2326 // semispace) to the top of the semispace. New objects allocated after the
2327 // iterator is created are not iterated.
2328 class SemiSpaceIterator : public ObjectIterator {
2330 // Create an iterator over the objects in the given space. If no start
2331 // address is given, the iterator starts from the bottom of the space. If
2332 // no size function is given, the iterator calls Object::Size().
2334 // Iterate over all of allocated to-space.
2335 explicit SemiSpaceIterator(NewSpace* space);
2336 // Iterate over all of allocated to-space, with a custome size function.
2337 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2338 // Iterate over part of allocated to-space, from start to the end
2340 SemiSpaceIterator(NewSpace* space, Address start);
2341 // Iterate from one address to another in the same semi-space.
2342 SemiSpaceIterator(Address from, Address to);
2344 HeapObject* Next() {
2345 if (current_ == limit_) return NULL;
2346 if (NewSpacePage::IsAtEnd(current_)) {
2347 NewSpacePage* page = NewSpacePage::FromLimit(current_);
2348 page = page->next_page();
2349 ASSERT(!page->is_anchor());
2350 current_ = page->area_start();
2351 if (current_ == limit_) return NULL;
2354 HeapObject* object = HeapObject::FromAddress(current_);
2355 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2361 // Implementation of the ObjectIterator functions.
2362 virtual HeapObject* next_object() { return Next(); }
2365 void Initialize(Address start,
2367 HeapObjectCallback size_func);
2369 // The current iteration point.
2371 // The end of iteration.
2373 // The callback function.
2374 HeapObjectCallback size_func_;
2378 // -----------------------------------------------------------------------------
2379 // A PageIterator iterates the pages in a semi-space.
2380 class NewSpacePageIterator BASE_EMBEDDED {
2382 // Make an iterator that runs over all pages in to-space.
2383 explicit inline NewSpacePageIterator(NewSpace* space);
2385 // Make an iterator that runs over all pages in the given semispace,
2386 // even those not used in allocation.
2387 explicit inline NewSpacePageIterator(SemiSpace* space);
2389 // Make iterator that iterates from the page containing start
2390 // to the page that contains limit in the same semispace.
2391 inline NewSpacePageIterator(Address start, Address limit);
2393 inline bool has_next();
2394 inline NewSpacePage* next();
2397 NewSpacePage* prev_page_; // Previous page returned.
2398 // Next page that will be returned. Cached here so that we can use this
2399 // iterator for operations that deallocate pages.
2400 NewSpacePage* next_page_;
2401 // Last page returned.
2402 NewSpacePage* last_page_;
2406 // -----------------------------------------------------------------------------
2407 // The young generation space.
2409 // The new space consists of a contiguous pair of semispaces. It simply
2410 // forwards most functions to the appropriate semispace.
2412 class NewSpace : public Space {
2415 explicit NewSpace(Heap* heap)
2416 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2417 to_space_(heap, kToSpace),
2418 from_space_(heap, kFromSpace),
2420 inline_allocation_limit_step_(0) {}
2422 // Sets up the new space using the given chunk.
2423 bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2425 // Tears down the space. Heap memory was not allocated by the space, so it
2426 // is not deallocated here.
2429 // True if the space has been set up but not torn down.
2430 bool HasBeenSetUp() {
2431 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2434 // Flip the pair of spaces.
2437 // Grow the capacity of the semispaces. Assumes that they are not at
2438 // their maximum capacity.
2441 // Shrink the capacity of the semispaces.
2444 // True if the address or object lies in the address range of either
2445 // semispace (not necessarily below the allocation pointer).
2446 bool Contains(Address a) {
2447 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2448 == reinterpret_cast<uintptr_t>(start_);
2451 bool Contains(Object* o) {
2452 Address a = reinterpret_cast<Address>(o);
2453 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2456 // Return the allocated bytes in the active semispace.
2457 virtual intptr_t Size() {
2458 return pages_used_ * NewSpacePage::kAreaSize +
2459 static_cast<int>(top() - to_space_.page_low());
2462 // The same, but returning an int. We have to have the one that returns
2463 // intptr_t because it is inherited, but if we know we are dealing with the
2464 // new space, which can't get as big as the other spaces then this is useful:
2465 int SizeAsInt() { return static_cast<int>(Size()); }
2467 // Return the current capacity of a semispace.
2468 intptr_t EffectiveCapacity() {
2469 SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2470 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2473 // Return the current capacity of a semispace.
2474 intptr_t Capacity() {
2475 ASSERT(to_space_.Capacity() == from_space_.Capacity());
2476 return to_space_.Capacity();
2479 // Return the total amount of memory committed for new space.
2480 intptr_t CommittedMemory() {
2481 if (from_space_.is_committed()) return 2 * Capacity();
2485 // Return the total amount of memory committed for new space.
2486 intptr_t MaximumCommittedMemory() {
2487 return to_space_.MaximumCommittedMemory() +
2488 from_space_.MaximumCommittedMemory();
2491 // Approximate amount of physical memory committed for this space.
2492 size_t CommittedPhysicalMemory();
2494 // Return the available bytes without growing.
2495 intptr_t Available() {
2496 return Capacity() - Size();
2499 // Return the maximum capacity of a semispace.
2500 int MaximumCapacity() {
2501 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2502 return to_space_.MaximumCapacity();
2505 bool IsAtMaximumCapacity() {
2506 return Capacity() == MaximumCapacity();
2509 // Returns the initial capacity of a semispace.
2510 int InitialCapacity() {
2511 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2512 return to_space_.InitialCapacity();
2515 // Return the address of the allocation pointer in the active semispace.
2517 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2518 return allocation_info_.top();
2521 void set_top(Address top) {
2522 ASSERT(to_space_.current_page()->ContainsLimit(top));
2523 allocation_info_.set_top(top);
2526 // Return the address of the allocation pointer limit in the active semispace.
2528 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2529 return allocation_info_.limit();
2532 // Return the address of the first object in the active semispace.
2533 Address bottom() { return to_space_.space_start(); }
2535 // Get the age mark of the inactive semispace.
2536 Address age_mark() { return from_space_.age_mark(); }
2537 // Set the age mark in the active semispace.
2538 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2540 // The start address of the space and a bit mask. Anding an address in the
2541 // new space with the mask will result in the start address.
2542 Address start() { return start_; }
2543 uintptr_t mask() { return address_mask_; }
2545 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2546 ASSERT(Contains(addr));
2547 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2548 IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2549 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2552 INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2553 return reinterpret_cast<Address>(index << kPointerSizeLog2);
2556 // The allocation top and limit address.
2557 Address* allocation_top_address() {
2558 return allocation_info_.top_address();
2561 // The allocation limit address.
2562 Address* allocation_limit_address() {
2563 return allocation_info_.limit_address();
2566 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
2568 // Reset the allocation pointer to the beginning of the active semispace.
2569 void ResetAllocationInfo();
2571 void UpdateInlineAllocationLimit(int size_in_bytes);
2572 void LowerInlineAllocationLimit(intptr_t step) {
2573 inline_allocation_limit_step_ = step;
2574 UpdateInlineAllocationLimit(0);
2575 top_on_previous_step_ = allocation_info_.top();
2578 // Get the extent of the inactive semispace (for use as a marking stack,
2579 // or to zap it). Notice: space-addresses are not necessarily on the
2580 // same page, so FromSpaceStart() might be above FromSpaceEnd().
2581 Address FromSpacePageLow() { return from_space_.page_low(); }
2582 Address FromSpacePageHigh() { return from_space_.page_high(); }
2583 Address FromSpaceStart() { return from_space_.space_start(); }
2584 Address FromSpaceEnd() { return from_space_.space_end(); }
2586 // Get the extent of the active semispace's pages' memory.
2587 Address ToSpaceStart() { return to_space_.space_start(); }
2588 Address ToSpaceEnd() { return to_space_.space_end(); }
2590 inline bool ToSpaceContains(Address address) {
2591 return to_space_.Contains(address);
2593 inline bool FromSpaceContains(Address address) {
2594 return from_space_.Contains(address);
2597 // True if the object is a heap object in the address range of the
2598 // respective semispace (not necessarily below the allocation pointer of the
2600 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
2601 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2603 // Try to switch the active semispace to a new, empty, page.
2604 // Returns false if this isn't possible or reasonable (i.e., there
2605 // are no pages, or the current page is already empty), or true
2607 bool AddFreshPage();
2610 // Verify the active semispace.
2611 virtual void Verify();
2615 // Print the active semispace.
2616 virtual void Print() { to_space_.Print(); }
2619 // Iterates the active semispace to collect statistics.
2620 void CollectStatistics();
2621 // Reports previously collected statistics of the active semispace.
2622 void ReportStatistics();
2623 // Clears previously collected statistics.
2624 void ClearHistograms();
2626 // Record the allocation or promotion of a heap object. Note that we don't
2627 // record every single allocation, but only those that happen in the
2628 // to space during a scavenge GC.
2629 void RecordAllocation(HeapObject* obj);
2630 void RecordPromotion(HeapObject* obj);
2632 // Return whether the operation succeded.
2633 bool CommitFromSpaceIfNeeded() {
2634 if (from_space_.is_committed()) return true;
2635 return from_space_.Commit();
2638 bool UncommitFromSpace() {
2639 if (!from_space_.is_committed()) return true;
2640 return from_space_.Uncommit();
2643 inline intptr_t inline_allocation_limit_step() {
2644 return inline_allocation_limit_step_;
2647 SemiSpace* active_space() { return &to_space_; }
2650 // Update allocation info to match the current to-space page.
2651 void UpdateAllocationInfo();
2653 Address chunk_base_;
2654 uintptr_t chunk_size_;
2657 SemiSpace to_space_;
2658 SemiSpace from_space_;
2659 VirtualMemory reservation_;
2662 // Start address and bit mask for containment testing.
2664 uintptr_t address_mask_;
2665 uintptr_t object_mask_;
2666 uintptr_t object_expected_;
2668 // Allocation pointer and limit for normal allocation and allocation during
2669 // mark-compact collection.
2670 AllocationInfo allocation_info_;
2672 // When incremental marking is active we will set allocation_info_.limit
2673 // to be lower than actual limit and then will gradually increase it
2674 // in steps to guarantee that we do incremental marking steps even
2675 // when all allocation is performed from inlined generated code.
2676 intptr_t inline_allocation_limit_step_;
2678 Address top_on_previous_step_;
2680 HistogramInfo* allocated_histogram_;
2681 HistogramInfo* promoted_histogram_;
2683 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
2685 friend class SemiSpaceIterator;
2688 TRACK_MEMORY("NewSpace")
2692 // -----------------------------------------------------------------------------
2693 // Old object space (excluding map objects)
2695 class OldSpace : public PagedSpace {
2697 // Creates an old space object with a given maximum capacity.
2698 // The constructor does not allocate pages from OS.
2699 OldSpace(Heap* heap,
2700 intptr_t max_capacity,
2702 Executability executable)
2703 : PagedSpace(heap, max_capacity, id, executable) {
2707 TRACK_MEMORY("OldSpace")
2711 // For contiguous spaces, top should be in the space (or at the end) and limit
2712 // should be the end of the space.
2713 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2714 SLOW_ASSERT((space).page_low() <= (info).top() \
2715 && (info).top() <= (space).page_high() \
2716 && (info).limit() <= (space).page_high())
2719 // -----------------------------------------------------------------------------
2720 // Old space for all map objects
2722 class MapSpace : public PagedSpace {
2724 // Creates a map space object with a maximum capacity.
2725 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2726 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2727 max_map_space_pages_(kMaxMapPageIndex - 1) {
2730 // Given an index, returns the page address.
2731 // TODO(1600): this limit is artifical just to keep code compilable
2732 static const int kMaxMapPageIndex = 1 << 16;
2734 virtual int RoundSizeDownToObjectAlignment(int size) {
2735 if (IsPowerOf2(Map::kSize)) {
2736 return RoundDown(size, Map::kSize);
2738 return (size / Map::kSize) * Map::kSize;
2743 virtual void VerifyObject(HeapObject* obj);
2746 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2748 // Do map space compaction if there is a page gap.
2749 int CompactionThreshold() {
2750 return kMapsPerPage * (max_map_space_pages_ - 1);
2753 const int max_map_space_pages_;
2756 TRACK_MEMORY("MapSpace")
2760 // -----------------------------------------------------------------------------
2761 // Old space for simple property cell objects
2763 class CellSpace : public PagedSpace {
2765 // Creates a property cell space object with a maximum capacity.
2766 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2767 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2770 virtual int RoundSizeDownToObjectAlignment(int size) {
2771 if (IsPowerOf2(Cell::kSize)) {
2772 return RoundDown(size, Cell::kSize);
2774 return (size / Cell::kSize) * Cell::kSize;
2779 virtual void VerifyObject(HeapObject* obj);
2782 TRACK_MEMORY("CellSpace")
2786 // -----------------------------------------------------------------------------
2787 // Old space for all global object property cell objects
2789 class PropertyCellSpace : public PagedSpace {
2791 // Creates a property cell space object with a maximum capacity.
2792 PropertyCellSpace(Heap* heap, intptr_t max_capacity,
2794 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2797 virtual int RoundSizeDownToObjectAlignment(int size) {
2798 if (IsPowerOf2(PropertyCell::kSize)) {
2799 return RoundDown(size, PropertyCell::kSize);
2801 return (size / PropertyCell::kSize) * PropertyCell::kSize;
2806 virtual void VerifyObject(HeapObject* obj);
2809 TRACK_MEMORY("PropertyCellSpace")
2813 // -----------------------------------------------------------------------------
2814 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2815 // the large object space. A large object is allocated from OS heap with
2816 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2817 // A large object always starts at Page::kObjectStartOffset to a page.
2818 // Large objects do not move during garbage collections.
2820 class LargeObjectSpace : public Space {
2822 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
2823 virtual ~LargeObjectSpace() {}
2825 // Initializes internal data structures.
2828 // Releases internal resources, frees objects in this space.
2831 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2832 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2833 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2836 // Shared implementation of AllocateRaw, AllocateRawCode and
2837 // AllocateRawFixedArray.
2838 MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
2839 Executability executable);
2841 // Available bytes for objects in this space.
2842 inline intptr_t Available();
2844 virtual intptr_t Size() {
2848 virtual intptr_t SizeOfObjects() {
2849 return objects_size_;
2852 intptr_t MaximumCommittedMemory() {
2853 return maximum_committed_;
2856 intptr_t CommittedMemory() {
2860 // Approximate amount of physical memory committed for this space.
2861 size_t CommittedPhysicalMemory();
2867 // Finds an object for a given address, returns a Smi if it is not found.
2868 // The function iterates through all objects in this space, may be slow.
2869 Object* FindObject(Address a);
2871 // Finds a large object page containing the given address, returns NULL
2872 // if such a page doesn't exist.
2873 LargePage* FindPage(Address a);
2875 // Frees unmarked objects.
2876 void FreeUnmarkedObjects();
2878 // Checks whether a heap object is in this space; O(1).
2879 bool Contains(HeapObject* obj);
2881 // Checks whether the space is empty.
2882 bool IsEmpty() { return first_page_ == NULL; }
2884 LargePage* first_page() { return first_page_; }
2887 virtual void Verify();
2891 virtual void Print();
2892 void ReportStatistics();
2893 void CollectCodeStatistics();
2895 // Checks whether an address is in the object area in this space. It
2896 // iterates all objects in the space. May be slow.
2897 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2900 intptr_t max_capacity_;
2901 intptr_t maximum_committed_;
2902 // The head of the linked list of large object chunks.
2903 LargePage* first_page_;
2904 intptr_t size_; // allocated bytes
2905 int page_count_; // number of chunks
2906 intptr_t objects_size_; // size of objects
2907 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2910 friend class LargeObjectIterator;
2913 TRACK_MEMORY("LargeObjectSpace")
2917 class LargeObjectIterator: public ObjectIterator {
2919 explicit LargeObjectIterator(LargeObjectSpace* space);
2920 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2924 // implementation of ObjectIterator.
2925 virtual HeapObject* next_object() { return Next(); }
2928 LargePage* current_;
2929 HeapObjectCallback size_func_;
2933 // Iterates over the chunks (pages and large object pages) that can contain
2934 // pointers to new space.
2935 class PointerChunkIterator BASE_EMBEDDED {
2937 inline explicit PointerChunkIterator(Heap* heap);
2939 // Return NULL when the iterator is done.
2940 MemoryChunk* next() {
2942 case kOldPointerState: {
2943 if (old_pointer_iterator_.has_next()) {
2944 return old_pointer_iterator_.next();
2950 if (map_iterator_.has_next()) {
2951 return map_iterator_.next();
2953 state_ = kLargeObjectState;
2956 case kLargeObjectState: {
2957 HeapObject* heap_object;
2959 heap_object = lo_iterator_.Next();
2960 if (heap_object == NULL) {
2961 state_ = kFinishedState;
2964 // Fixed arrays are the only pointer-containing objects in large
2966 } while (!heap_object->IsFixedArray());
2967 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2970 case kFinishedState:
2988 PageIterator old_pointer_iterator_;
2989 PageIterator map_iterator_;
2990 LargeObjectIterator lo_iterator_;
2995 struct CommentStatistic {
2996 const char* comment;
3004 // Must be small, since an iteration is used for lookup.
3005 static const int kMaxComments = 64;
3010 } } // namespace v8::internal
3012 #endif // V8_SPACES_H_