// Committing memory to from space failed.
// Try shrinking and try again.
+ PagedSpaces spaces;
+ for (PagedSpace* space = spaces.next();
+ space != NULL;
+ space = spaces.next()) {
+ space->RelinkPageListInChunkOrder(true);
+ }
+
Shrink();
if (new_space_.CommitFromSpaceIfNeeded()) return;
}
-static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start,
- static_cast<int>(current - free_start),
- true,
- false);
+ space->DeallocateBlock(free_start,
+ static_cast<int>(current - free_start),
+ true);
is_previous_alive = true;
}
} else {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false, true);
+ space->DeallocateBlock(free_start, size_in_bytes, false);
}
}
} else {
if (last_free_size > 0) {
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
- dealloc(last_free_start, last_free_size, true, true);
+ space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false, true);
+ space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
}
-void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Objects in map space are assumed to have size Map::kSize and a
- // valid map in their first word. Thus, we break the free block up into
- // chunks and free them separately.
- ASSERT(size_in_bytes % Map::kSize == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += Map::kSize) {
- Heap::map_space()->Free(a, add_to_freelist);
- }
-}
-
-
-void MarkCompactCollector::DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Free-list elements in cell space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = Heap::cell_space()->object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Heap::cell_space()->Free(a, add_to_freelist);
- }
-}
-
-
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
- SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
- SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
- SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
+ SweepSpace(Heap::old_pointer_space());
+ SweepSpace(Heap::old_data_space());
+ SweepSpace(Heap::code_space());
+ SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
- SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
// to the first live object in the page (only used for old and map objects).
typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-// Callback function for non-live blocks in the old generation.
-// If add_to_freelist is false then just accounting stats are updated and
-// no attempt to add area to free list is made.
-typedef void (*DeallocateFunction)(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
-
// Forward declarations.
class RootMarkingVisitor;
class MarkingVisitor;
static int IterateLiveObjectsInRange(Address start, Address end,
HeapObjectCallback size_func);
- // Callback functions for deallocating non-live blocks in the old
- // generation.
- static void DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
- static void DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page);
-
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
// regions to each space's free list.
}
-void PagedSpace::PrepareForMarkCompact(bool will_compact) {
- if (will_compact) {
- // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
- // to skip unused pages. Update flag value for all pages in space.
- PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
- Page* last_in_use = AllocationTopPage();
- bool in_use = true;
-
- while (all_pages_iterator.has_next()) {
- Page* p = all_pages_iterator.next();
- p->SetWasInUseBeforeMC(in_use);
- if (p == last_in_use) {
- // We passed a page containing allocation top. All consequent
- // pages are not used.
- in_use = false;
+void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) {
+ const bool add_to_freelist = true;
+
+ // Mark used and unused pages to properly fill unused pages
+ // after reordering.
+ PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
+ Page* last_in_use = AllocationTopPage();
+ bool in_use = true;
+
+ while (all_pages_iterator.has_next()) {
+ Page* p = all_pages_iterator.next();
+ p->SetWasInUseBeforeMC(in_use);
+ if (p == last_in_use) {
+ // We passed a page containing allocation top. All consequent
+ // pages are not used.
+ in_use = false;
+ }
+ }
+
+ if (page_list_is_chunk_ordered_) return;
+
+ Page* new_last_in_use = Page::FromAddress(NULL);
+ MemoryAllocator::RelinkPageListInChunkOrder(this,
+ &first_page_,
+ &last_page_,
+ &new_last_in_use);
+ ASSERT(new_last_in_use->is_valid());
+
+ if (new_last_in_use != last_in_use) {
+ // Current allocation top points to a page which is now in the middle
+ // of page list. We should move allocation top forward to the new last
+ // used page so various object iterators will continue to work properly.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
+ last_in_use->AllocationTop());
+
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
+ if (size_in_bytes > 0) {
+ Address start = last_in_use->AllocationTop();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
}
- if (!page_list_is_chunk_ordered_) {
- Page* new_last_in_use = Page::FromAddress(NULL);
- MemoryAllocator::RelinkPageListInChunkOrder(this,
- &first_page_,
- &last_page_,
- &new_last_in_use);
- ASSERT(new_last_in_use->is_valid());
-
- if (new_last_in_use != last_in_use) {
- // Current allocation top points to a page which is now in the middle
- // of page list. We should move allocation top forward to the new last
- // used page so various object iterators will continue to work properly.
- last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
-
- int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
- last_in_use->AllocationTop());
-
- if (size_in_bytes > 0) {
- // There is still some space left on this page. Create a fake
- // object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page
- // correctly.
-
- Heap::CreateFillerObjectAt(last_in_use->AllocationTop(),
- size_in_bytes);
- }
+ // New last in use page was in the middle of the list before
+ // sorting so it full.
+ SetTop(new_last_in_use->AllocationTop());
- // New last in use page was in the middle of the list before
- // sorting so it full.
- SetTop(new_last_in_use->AllocationTop());
+ ASSERT(AllocationTopPage() == new_last_in_use);
+ ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+ }
- ASSERT(AllocationTopPage() == new_last_in_use);
- ASSERT(AllocationTopPage()->WasInUseBeforeMC());
- }
+ PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+ while (pages_in_use_iterator.has_next()) {
+ Page* p = pages_in_use_iterator.next();
+ if (!p->WasInUseBeforeMC()) {
+ // Empty page is in the middle of a sequence of used pages.
+ // Allocate it as a whole and deallocate immediately.
+ int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
+ p->ObjectAreaStart());
- PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
- while (pages_in_use_iterator.has_next()) {
- Page* p = pages_in_use_iterator.next();
- if (!p->WasInUseBeforeMC()) {
- // Empty page is in the middle of a sequence of used pages.
- // Create a fake object which will occupy all free space on this page.
- // Otherwise iterators would not be able to scan this page correctly.
- int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
- p->ObjectAreaStart());
-
- p->SetAllocationWatermark(p->ObjectAreaStart());
- Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
- }
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ Address start = p->ObjectAreaStart();
+ if (deallocate_blocks) {
+ accounting_stats_.AllocateBytes(size_in_bytes);
+ DeallocateBlock(start, size_in_bytes, add_to_freelist);
+ } else {
+ Heap::CreateFillerObjectAt(start, size_in_bytes);
}
-
- page_list_is_chunk_ordered_ = true;
}
}
+
+ page_list_is_chunk_ordered_ = true;
+}
+
+
+void PagedSpace::PrepareForMarkCompact(bool will_compact) {
+ if (will_compact) {
+ RelinkPageListInChunkOrder(false);
+ }
}
}
+void OldSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ Free(start, size_in_bytes, add_to_freelist);
+}
+
+
#ifdef DEBUG
struct CommentStatistic {
const char* comment;
}
+void FixedSpace::DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) {
+ // Free-list elements in fixed space are assumed to have a fixed size.
+ // We break the free block into chunks and add them to the free list
+ // individually.
+ int size = object_size_in_bytes();
+ ASSERT(size_in_bytes % size == 0);
+ Address end = start + size_in_bytes;
+ for (Address a = start; a < end; a += size) {
+ Free(a, add_to_freelist);
+ }
+}
+
+
#ifdef DEBUG
void FixedSpace::ReportStatistics() {
int pct = Available() * 100 / Capacity();
// Freed pages are moved to the end of page list.
void FreePages(Page* prev, Page* last);
+ // Deallocates a block.
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist) = 0;
+
// Set space allocation info.
void SetTop(Address top) {
allocation_info_.top = top;
// Returns the page of the allocation pointer.
Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
+ void RelinkPageListInChunkOrder(bool deallocate_blocks);
+
protected:
// Maximum capacity of this space.
int max_capacity_;
}
}
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
+
// Prepare for full garbage collection. Resets the relocation pointer and
// clears the free list.
virtual void PrepareForMarkCompact(bool will_compact);
virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+ virtual void DeallocateBlock(Address start,
+ int size_in_bytes,
+ bool add_to_freelist);
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();