PageIterator it(space, PageIterator::PAGES_IN_USE);
while (it.has_next()) {
Page* p = it.next();
- // The offset of each live object in the page from the first live object
- // in the page.
- int offset = 0;
- EncodeForwardingAddressesInRange<Alloc,
- EncodeForwardingAddressInPagedSpace,
- ProcessNonLive>(
- p->ObjectAreaStart(),
- p->AllocationTop(),
- &offset);
+
+ if (p->WasInUseBeforeMC()) {
+ // The offset of each live object in the page from the first live object
+ // in the page.
+ int offset = 0;
+ EncodeForwardingAddressesInRange<Alloc,
+ EncodeForwardingAddressInPagedSpace,
+ ProcessNonLive>(
+ p->ObjectAreaStart(),
+ p->AllocationTop(),
+ &offset);
+ } else {
+ // Mark whole unused page as a free region.
+ EncodeFreeRegion(p->ObjectAreaStart(),
+ p->AllocationTop() - p->ObjectAreaStart());
+ }
}
}
static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ // During sweeping of paged space we are trying to find longest sequences
+ // of pages without live objects and free them (instead of putting them on
+ // the free list).
+ Page* prev = NULL; // Page preceding current.
+ Page* first_empty_page = NULL; // First empty page in a sequence.
+ Page* prec_first_empty_page = NULL; // Page preceding first empty page.
+
+ // If last used page of space ends with a sequence of dead objects
+ // we can adjust allocation top instead of puting this free area into
+ // the free list. Thus during sweeping we keep track of such areas
+ // and defer their deallocation until the sweeping of the next page
+ // is done: if one of the next pages contains live objects we have
+ // to put such area into the free list.
+ Address last_free_start = NULL;
+ int last_free_size = 0;
+
while (it.has_next()) {
Page* p = it.next();
if (object->IsMarked()) {
object->ClearMark();
MarkCompactCollector::tracer()->decrement_marked_count();
+
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, static_cast<int>(current - free_start));
+ dealloc(free_start, static_cast<int>(current - free_start), true);
is_previous_alive = true;
}
} else {
// loop.
}
- // If the last region was not live we need to deallocate from
- // free_start to the allocation top in the page.
- if (!is_previous_alive) {
- int free_size = static_cast<int>(p->AllocationTop() - free_start);
- if (free_size > 0) {
- dealloc(free_start, free_size);
+ bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
+ || (!is_previous_alive && free_start == p->ObjectAreaStart());
+
+ if (page_is_empty) {
+ // This page is empty. Check whether we are in the middle of
+ // sequence of empty pages and start one if not.
+ if (first_empty_page == NULL) {
+ first_empty_page = p;
+ prec_first_empty_page = prev;
+ }
+
+ if (!is_previous_alive) {
+ // There are dead objects on this page. Update space accounting stats
+ // without putting anything into free list.
+ int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
+ if (size_in_bytes > 0) {
+ dealloc(free_start, size_in_bytes, false);
+ }
+ }
+ } else {
+ // This page is not empty. Sequence of empty pages ended on the previous
+ // one.
+ if (first_empty_page != NULL) {
+ space->FreePages(prec_first_empty_page, prev);
+ prec_first_empty_page = first_empty_page = NULL;
+ }
+
+ // If there is a free ending area on one of the previous pages we have
+ // deallocate that area and put it on the free list.
+ if (last_free_size > 0) {
+ dealloc(last_free_start, last_free_size, true);
+ last_free_start = NULL;
+ last_free_size = 0;
+ }
+
+ // If the last region of this page was not live we remember it.
+ if (!is_previous_alive) {
+ ASSERT(last_free_size == 0);
+ last_free_size = static_cast<int>(p->AllocationTop() - free_start);
+ last_free_start = free_start;
}
}
+
+ prev = p;
+ }
+
+ // We reached end of space. See if we need to adjust allocation top.
+ Address new_allocation_top = NULL;
+
+ if (first_empty_page != NULL) {
+ // Last used pages in space are empty. We can move allocation top backwards
+ // to the beginning of first empty page.
+ ASSERT(prev == space->AllocationTopPage());
+
+ new_allocation_top = first_empty_page->ObjectAreaStart();
+ }
+
+ if (last_free_size > 0) {
+ // There was a free ending area on the previous page.
+ // Deallocate it without putting it into freelist and move allocation
+ // top to the beginning of this free area.
+ dealloc(last_free_start, last_free_size, false);
+ new_allocation_top = last_free_start;
+ }
+
+ if (new_allocation_top != NULL) {
+ Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
+
+ ASSERT(((first_empty_page == NULL) &&
+ (new_allocation_top_page == space->AllocationTopPage())) ||
+ ((first_empty_page != NULL) && (last_free_size > 0) &&
+ (new_allocation_top_page == prec_first_empty_page)) ||
+ ((first_empty_page != NULL) && (last_free_size == 0) &&
+ (new_allocation_top_page == first_empty_page)));
+
+ space->SetTop(new_allocation_top,
+ new_allocation_top_page->ObjectAreaEnd());
}
}
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
- int size_in_bytes) {
+ int size_in_bytes,
+ bool add_to_freelist) {
Heap::ClearRSetRange(start, size_in_bytes);
- Heap::old_pointer_space()->Free(start, size_in_bytes);
+ Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
- int size_in_bytes) {
- Heap::old_data_space()->Free(start, size_in_bytes);
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
- int size_in_bytes) {
- Heap::code_space()->Free(start, size_in_bytes);
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
- int size_in_bytes) {
+ int size_in_bytes,
+ bool add_to_freelist) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
- Heap::map_space()->Free(a);
+ Heap::map_space()->Free(a, add_to_freelist);
}
}
void MarkCompactCollector::DeallocateCellBlock(Address start,
- int size_in_bytes) {
+ int size_in_bytes,
+ bool add_to_freelist) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
- Heap::cell_space()->Free(a);
+ Heap::cell_space()->Free(a, add_to_freelist);
}
}
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// Callback function for non-live blocks in the old generation.
-typedef void (*DeallocateFunction)(Address start, int size_in_bytes);
+// If add_to_freelist is false then just accounting stats are updated and
+// no attempt to add area to free list is made.
+typedef void (*DeallocateFunction)(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
// Forward declarations.
// Callback functions for deallocating non-live blocks in the old
// generation.
- static void DeallocateOldPointerBlock(Address start, int size_in_bytes);
- static void DeallocateOldDataBlock(Address start, int size_in_bytes);
- static void DeallocateCodeBlock(Address start, int size_in_bytes);
- static void DeallocateMapBlock(Address start, int size_in_bytes);
- static void DeallocateCellBlock(Address start, int size_in_bytes);
+ static void DeallocateOldPointerBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ static void DeallocateOldDataBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ static void DeallocateCodeBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ static void DeallocateMapBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
+ static void DeallocateCellBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
}
+bool Page::GetPageFlag(PageFlag flag) {
+ return (flags & flag) != 0;
+}
+
+
+void Page::SetPageFlag(PageFlag flag, bool value) {
+ if (value) {
+ flags |= flag;
+ } else {
+ flags &= ~flag;
+ }
+}
+
+
+bool Page::WasInUseBeforeMC() {
+ return GetPageFlag(WAS_IN_USE_BEFORE_MC);
+}
+
+
+void Page::SetWasInUseBeforeMC(bool was_in_use) {
+ SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
+}
+
+
+bool Page::IsLargeObjectPage() {
+ return !GetPageFlag(IS_NORMAL_PAGE);
+}
+
+
+void Page::SetIsLargeObjectPage(bool is_large_object_page) {
+ SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
+}
+
+
// -----------------------------------------------------------------------------
// MemoryAllocator
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
- p->is_normal_page = 1;
+ p->SetIsLargeObjectPage(false);
page_addr += Page::kPageSize;
}
}
+void MemoryAllocator::FreeAllPages(PagedSpace* space) {
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ if (chunks_[i].owner() == space) {
+ DeleteChunk(i);
+ }
+ }
+}
+
+
void MemoryAllocator::DeleteChunk(int chunk_id) {
ASSERT(IsValidChunk(chunk_id));
#endif
+void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use) {
+ Page* first = NULL;
+ Page* last = NULL;
+
+ for (int i = 0, length = chunks_.length(); i < length; i++) {
+ ChunkInfo& chunk = chunks_[i];
+
+ if (chunk.owner() == space) {
+ if (first == NULL) {
+ Address low = RoundUp(chunk.address(), Page::kPageSize);
+ first = Page::FromAddress(low);
+ }
+ last = RelinkPagesInChunk(i,
+ chunk.address(),
+ chunk.size(),
+ last,
+ last_page_in_use);
+ }
+ }
+
+ if (first_page != NULL) {
+ *first_page = first;
+ }
+
+ if (last_page != NULL) {
+ *last_page = last;
+ }
+}
+
+
+Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ int chunk_size,
+ Page* prev,
+ Page** last_page_in_use) {
+ Address page_addr = RoundUp(chunk_start, Page::kPageSize);
+ int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
+
+ if (prev->is_valid()) {
+ SetNextPage(prev, Page::FromAddress(page_addr));
+ }
+
+ for (int i = 0; i < pages_in_chunk; i++) {
+ Page* p = Page::FromAddress(page_addr);
+ p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ page_addr += Page::kPageSize;
+
+ if (p->WasInUseBeforeMC()) {
+ *last_page_in_use = p;
+ }
+ }
+
+ // Set the next page of the last page to 0.
+ Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
+ last_page->opaque_header = OffsetFrom(0) | chunk_id;
+
+ if (last_page->WasInUseBeforeMC()) {
+ *last_page_in_use = last_page;
+ }
+
+ return last_page;
+}
+
+
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
// Use first_page_ for allocation.
SetAllocationInfo(&allocation_info_, first_page_);
+ page_list_is_chunk_ordered_ = true;
+
return true;
}
void PagedSpace::TearDown() {
- first_page_ = MemoryAllocator::FreePages(first_page_);
- ASSERT(!first_page_->is_valid());
-
+ MemoryAllocator::FreeAllPages(this);
+ first_page_ = NULL;
accounting_stats_.Clear();
}
void PagedSpace::Shrink() {
+ if (!page_list_is_chunk_ordered_) {
+ // We can't shrink space if pages is not chunk-ordered
+ // (see comment for class MemoryAllocator for definition).
+ return;
+ }
+
// Release half of free pages.
Page* top_page = AllocationTopPage();
ASSERT(top_page->is_valid());
// OldSpace implementation
void OldSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpace::PrepareForMarkCompact(will_compact);
+
if (will_compact) {
// Reset relocation info. During a compacting collection, everything in
// the space is considered 'available' and we will rediscover live data
}
+void PagedSpace::FreePages(Page* prev, Page* last) {
+ if (last == AllocationTopPage()) {
+ // Pages are already at the end of used pages.
+ return;
+ }
+
+ Page* first = NULL;
+
+ // Remove pages from the list.
+ if (prev == NULL) {
+ first = first_page_;
+ first_page_ = last->next_page();
+ } else {
+ first = prev->next_page();
+ MemoryAllocator::SetNextPage(prev, last->next_page());
+ }
+
+ // Attach it after the last page.
+ MemoryAllocator::SetNextPage(last_page_, first);
+ last_page_ = last;
+ MemoryAllocator::SetNextPage(last, NULL);
+
+ // Clean them up.
+ do {
+ first->ClearRSet();
+ first = first->next_page();
+ } while (first != NULL);
+
+ // Order of pages in this space might no longer be consistent with
+ // order of pages in chunks.
+ page_list_is_chunk_ordered_ = false;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
+ // to skip unused pages. Update flag value for all pages in space.
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (it.has_next()) {
+ Page* p = it.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
+
+ if (!page_list_is_chunk_ordered_) {
+ Page* new_last_in_use = NULL;
+ MemoryAllocator::RelinkPageListInChunkOrder(this,
+ &first_page_,
+ &last_page_,
+ &new_last_in_use);
+ ASSERT(new_last_in_use != NULL);
+
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+
+ int size_in_bytes =
+ last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop();
+
+ if (size_in_bytes > 0) {
+ // There is still some space left on this page. Create a fake
+ // object which will occupy all free space on this page.
+ // Otherwise iterators would not be able to scan this page
+ // correctly.
+
+ FreeListNode* node =
+ FreeListNode::FromAddress(last_in_use->AllocationTop());
+ node->set_size(last_in_use->ObjectAreaEnd() -
+ last_in_use->AllocationTop());
+ }
+
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop(),
+ new_last_in_use->AllocationTop());
+
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ }
+
+ page_list_is_chunk_ordered_ = true;
+ }
+ }
+}
+
+
bool PagedSpace::ReserveSpace(int bytes) {
Address limit = allocation_info_.limit;
Address top = allocation_info_.top;
// FixedSpace implementation
void FixedSpace::PrepareForMarkCompact(bool will_compact) {
+ // Call prepare of the super class.
+ PagedSpace::PrepareForMarkCompact(will_compact);
+
if (will_compact) {
// Reset relocation info.
MCResetRelocationInfo();
// large object page. If the chunk_size happened to be written there, its
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
- page->is_normal_page &= ~0x1;
+ page->SetIsLargeObjectPage(true);
page->ClearRSet();
int extra_bytes = requested_size - object_size;
if (extra_bytes > 0) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
}
+ // True if this page was in use before current compaction started.
+ // Result is valid only for pages owned by paged spaces and
+ // only after PagedSpace::PrepareForMarkCompact was called.
+ inline bool WasInUseBeforeMC();
+
+ inline void SetWasInUseBeforeMC(bool was_in_use);
+
// True if this page is a large object page.
- bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
+ inline bool IsLargeObjectPage();
+
+ inline void SetIsLargeObjectPage(bool is_large_object_page);
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
+ enum PageFlag {
+ IS_NORMAL_PAGE = 1 << 0,
+ WAS_IN_USE_BEFORE_MC = 1 << 1
+ };
+
+ inline bool GetPageFlag(PageFlag flag);
+ inline void SetPageFlag(PageFlag flag, bool value);
+
//---------------------------------------------------------------------------
// Page header description.
//
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
- int is_normal_page;
+ // For normal pages this word is used to store various page flags.
+ int flags;
// The following fields may overlap with remembered set, they can only
// be used in the mark-compact collector when remembered set is not
//
// The memory allocator also allocates chunks for the large object space, but
// they are managed by the space itself. The new space does not expand.
+//
+// The fact that pages for paged spaces are allocated and deallocated in chunks
+// induces a constraint on the order of pages in a linked lists. We say that
+// pages are linked in the chunk-order if and only if every two consecutive
+// pages from the same chunk are consecutive in the linked list.
+//
+
class MemoryAllocator : public AllStatic {
public:
static Page* AllocatePages(int requested_pages, int* allocated_pages,
PagedSpace* owner);
- // Frees pages from a given page and after. If 'p' is the first page
- // of a chunk, pages from 'p' are freed and this function returns an
- // invalid page pointer. Otherwise, the function searches a page
- // after 'p' that is the first page of a chunk. Pages after the
- // found page are freed and the function returns 'p'.
+ // Frees pages from a given page and after. Requires pages to be
+ // linked in chunk-order (see comment for class).
+ // If 'p' is the first page of a chunk, pages from 'p' are freed
+ // and this function returns an invalid page pointer.
+ // Otherwise, the function searches a page after 'p' that is
+ // the first page of a chunk. Pages after the found page
+ // are freed and the function returns 'p'.
static Page* FreePages(Page* p);
+ // Frees all pages owned by given space.
+ static void FreeAllPages(PagedSpace* space);
+
// Allocates and frees raw memory of certain size.
// These are just thin wrappers around OS::Allocate and OS::Free,
// but keep track of allocated bytes as part of heap.
static Page* FindFirstPageInSameChunk(Page* p);
static Page* FindLastPageInSameChunk(Page* p);
+ // Relinks list of pages owned by space to make it chunk-ordered.
+ // Returns new first and last pages of space.
+ // Also returns last page in relinked list which has WasInUsedBeforeMC
+ // flag set.
+ static void RelinkPageListInChunkOrder(PagedSpace* space,
+ Page** first_page,
+ Page** last_page,
+ Page** last_page_in_use);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect a block of memory by marking it read-only/writable.
static inline void Protect(Address start, size_t size);
// used as a marking stack and its page headers are destroyed.
static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
PagedSpace* owner);
+
+ static Page* RelinkPagesInChunk(int chunk_id,
+ Address chunk_start,
+ int chunk_size,
+ Page* prev,
+ Page** last_page_in_use);
};
void ClearRSet();
// Prepares for a mark-compact GC.
- virtual void PrepareForMarkCompact(bool will_compact) = 0;
+ virtual void PrepareForMarkCompact(bool will_compact);
virtual Address PageAllocationTop(Page* page) = 0;
// Used by ReserveSpace.
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
+ // Free all pages in range from prev (exclusive) to last (inclusive).
+ // Freed pages are moved to the end of page list.
+ void FreePages(Page* prev, Page* last);
+
+ // Set space allocation info.
+ void SetTop(Address top, Address limit) {
+ allocation_info_.top = top;
+ allocation_info_.limit = limit;
+ }
+
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
static void ResetCodeStatistics();
#endif
+ // Returns the page of the allocation pointer.
+ Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+
protected:
// Maximum capacity of this space.
int max_capacity_;
// Expand and Shrink.
Page* last_page_;
+ // True if pages owned by this space are linked in chunk-order.
+ // See comment for class MemoryAllocator for definition of chunk-order.
+ bool page_list_is_chunk_ordered_;
+
// Normal allocation information.
AllocationInfo allocation_info_;
void DoPrintRSet(const char* space_name);
#endif
private:
- // Returns the page of the allocation pointer.
- Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
// Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
// Give a block of memory to the space's free list. It might be added to
// the free list or accounted as waste.
- void Free(Address start, int size_in_bytes) {
- int wasted_bytes = free_list_.Free(start, size_in_bytes);
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, int size_in_bytes, bool add_to_freelist) {
accounting_stats_.DeallocateBytes(size_in_bytes);
- accounting_stats_.WasteBytes(wasted_bytes);
+
+ if (add_to_freelist) {
+ int wasted_bytes = free_list_.Free(start, size_in_bytes);
+ accounting_stats_.WasteBytes(wasted_bytes);
+ }
}
// Prepare for full garbage collection. Resets the relocation pointer and
int object_size_in_bytes() { return object_size_in_bytes_; }
// Give a fixed sized block of memory to the space's free list.
- void Free(Address start) {
- free_list_.Free(start);
+ // If add_to_freelist is false then just accounting stats are updated and
+ // no attempt to add area to free list is made.
+ void Free(Address start, bool add_to_freelist) {
+ if (add_to_freelist) {
+ free_list_.Free(start);
+ }
accounting_stats_.DeallocateBytes(object_size_in_bytes_);
}
}
CHECK(bytes_to_page > FixedArray::kHeaderSize);
- int* is_normal_page_ptr = &Page::FromAddress(next_page)->is_normal_page;
- Address is_normal_page_addr = reinterpret_cast<Address>(is_normal_page_ptr);
+ intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags;
+ Address flags_addr = reinterpret_cast<Address>(flags_ptr);
int bytes_to_allocate =
- static_cast<int>(is_normal_page_addr - current_top) + kPointerSize;
+ static_cast<int>(flags_addr - current_top) + kPointerSize;
int n_elements = (bytes_to_allocate - FixedArray::kHeaderSize) /
kPointerSize;
Heap::AllocateFixedArray(n_elements));
int index = n_elements - 1;
- CHECK_EQ(is_normal_page_ptr,
+ CHECK_EQ(flags_ptr,
HeapObject::RawField(array, FixedArray::OffsetOfElementAt(index)));
array->set(index, Smi::FromInt(0));
// This chould have turned next page into LargeObjectPage:
CHECK(p->is_valid());
p->opaque_header = 0;
- p->is_normal_page = 0x1;
+ p->SetIsLargeObjectPage(false);
CHECK(!p->next_page()->is_valid());
CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);