From 7a0a0b8b85e4cdf06795ffea01855b345776b932 Mon Sep 17 00:00:00 2001 From: mlippautz Date: Thu, 17 Sep 2015 00:58:18 -0700 Subject: [PATCH] Revert of [heap] Introduce parallel compaction algorithm. (patchset #9 id:160001 of https://codereview.chromium.org/1343333002/ ) Reason for revert: Check failed: https://chromegw.corp.google.com/i/client.v8/builders/V8%20Win64/builds/5535/steps/Check%20%28flakes%29/logs/IndependentWeakHandle Original issue's description: > [heap] Introduce parallel compaction algorithm. > > - The number of parallel tasks is still 1, i.e., we only compact on the main > thread. > - Remove emergency memory (PagedSpace, and CodeRange) > - Introduce partial compaction of pages. > - Logic for multiple tasks is in place. > > BUG=chromium:524425 > LOG=N > > Committed: https://crrev.com/61ea4f55616d3f7bc2ce049a678f16f7475e03e0 > Cr-Commit-Position: refs/heads/master@{#30787} TBR=hpayer@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=chromium:524425 Review URL: https://codereview.chromium.org/1347873003 Cr-Commit-Position: refs/heads/master@{#30788} --- src/heap/mark-compact.cc | 219 ++++++++++++++++++----------------------------- src/heap/mark-compact.h | 18 ++-- src/heap/spaces.cc | 107 ++++++++++++++++++----- src/heap/spaces.h | 69 +++++---------- 4 files changed, 195 insertions(+), 218 deletions(-) diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index c88160e..ad2ef36 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -57,8 +57,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap) sweeping_in_progress_(false), parallel_compaction_in_progress_(false), pending_sweeper_jobs_semaphore_(0), - pending_compaction_tasks_semaphore_(0), - concurrent_compaction_tasks_active_(0), + pending_compaction_jobs_semaphore_(0), evacuation_(false), slots_buffer_allocator_(nullptr), migration_slots_buffer_(nullptr), @@ -475,21 +474,21 @@ void MarkCompactCollector::ClearMarkbits() { class MarkCompactCollector::CompactionTask : public v8::Task { public: - explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) - : heap_(heap), spaces_(spaces) {} + explicit CompactionTask(Heap* heap) : heap_(heap) {} virtual ~CompactionTask() {} private: // v8::Task overrides. void Run() override { - heap_->mark_compact_collector()->EvacuatePages(spaces_); + // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be + // called by one thread concurrently. + heap_->mark_compact_collector()->EvacuatePages(); heap_->mark_compact_collector() - ->pending_compaction_tasks_semaphore_.Signal(); + ->pending_compaction_jobs_semaphore_.Signal(); } Heap* heap_; - CompactionSpaceCollection* spaces_; DISALLOW_COPY_AND_ASSIGN(CompactionTask); }; @@ -3352,10 +3351,11 @@ void MarkCompactCollector::EvacuateNewSpace() { } -bool MarkCompactCollector::EvacuateLiveObjectsFromPage( +void MarkCompactCollector::EvacuateLiveObjectsFromPage( Page* p, PagedSpace* target_space) { AlwaysAllocateScope always_allocate(isolate()); DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); + p->SetWasSwept(); int offsets[16]; @@ -3376,8 +3376,17 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage( HeapObject* target_object = nullptr; AllocationResult allocation = target_space->AllocateRaw(size, alignment); if (!allocation.To(&target_object)) { - return false; + // If allocation failed, use emergency memory and re-try allocation. + CHECK(target_space->HasEmergencyMemory()); + target_space->UseEmergencyMemory(); + allocation = target_space->AllocateRaw(size, alignment); + } + if (!allocation.To(&target_object)) { + // OS refused to give us memory. + V8::FatalProcessOutOfMemory("Evacuation"); + return; } + MigrateObject(target_object, object, size, target_space->identity()); DCHECK(object->map_word().IsForwardingAddress()); } @@ -3386,142 +3395,80 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage( *cell = 0; } p->ResetLiveBytes(); - return true; } void MarkCompactCollector::EvacuatePagesInParallel() { - if (evacuation_candidates_.length() == 0) return; - - int num_tasks = 1; - if (FLAG_parallel_compaction) { - num_tasks = NumberOfParallelCompactionTasks(); - } - - // Set up compaction spaces. - CompactionSpaceCollection** compaction_spaces_for_tasks = - new CompactionSpaceCollection*[num_tasks]; - for (int i = 0; i < num_tasks; i++) { - compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); - } - - compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( - heap()->old_space()); - compaction_spaces_for_tasks[0] - ->Get(CODE_SPACE) - ->MoveOverFreeMemory(heap()->code_space()); - parallel_compaction_in_progress_ = true; - // Kick off parallel tasks. - for (int i = 1; i < num_tasks; i++) { - concurrent_compaction_tasks_active_++; - V8::GetCurrentPlatform()->CallOnBackgroundThread( - new CompactionTask(heap(), compaction_spaces_for_tasks[i]), - v8::Platform::kShortRunningTask); - } - - // Contribute in main thread. Counter and signal are in principal not needed. - concurrent_compaction_tasks_active_++; - EvacuatePages(compaction_spaces_for_tasks[0]); - pending_compaction_tasks_semaphore_.Signal(); - - WaitUntilCompactionCompleted(); - - // Merge back memory (compacted and unused) from compaction spaces. - for (int i = 0; i < num_tasks; i++) { - heap()->old_space()->MergeCompactionSpace( - compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); - heap()->code_space()->MergeCompactionSpace( - compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); - delete compaction_spaces_for_tasks[i]; - } - delete[] compaction_spaces_for_tasks; - - // Finalize sequentially. - const int num_pages = evacuation_candidates_.length(); - int abandoned_pages = 0; - for (int i = 0; i < num_pages; i++) { - Page* p = evacuation_candidates_[i]; - switch (p->parallel_compaction_state().Value()) { - case MemoryChunk::ParallelCompactingState::kCompactingAborted: - // We have partially compacted the page, i.e., some objects may have - // moved, others are still in place. - // We need to: - // - Leave the evacuation candidate flag for later processing of - // slots buffer entries. - // - Leave the slots buffer there for processing of entries added by - // the write barrier. - // - Rescan the page as slot recording in the migration buffer only - // happens upon moving (which we potentially didn't do). - // - Leave the page in the list of pages of a space since we could not - // fully evacuate it. - DCHECK(p->IsEvacuationCandidate()); - p->SetFlag(Page::RESCAN_ON_EVACUATION); - abandoned_pages++; - break; - case MemoryChunk::kCompactingFinalize: - DCHECK(p->IsEvacuationCandidate()); - p->SetWasSwept(); - p->Unlink(); - break; - case MemoryChunk::kCompactingDone: - DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); - DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); - break; - default: - // We should not observe kCompactingInProgress, or kCompactingDone. - UNREACHABLE(); - } - p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); - } - if (num_pages > 0) { - if (FLAG_trace_fragmentation) { - if (abandoned_pages != 0) { - PrintF( - " Abandoned (at least partially) %d out of %d page compactions due" - " to lack of memory\n", - abandoned_pages, num_pages); - } else { - PrintF(" Compacted %d pages\n", num_pages); - } - } - } + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new CompactionTask(heap()), v8::Platform::kShortRunningTask); } void MarkCompactCollector::WaitUntilCompactionCompleted() { - while (concurrent_compaction_tasks_active_-- > 0) { - pending_compaction_tasks_semaphore_.Wait(); - } + pending_compaction_jobs_semaphore_.Wait(); parallel_compaction_in_progress_ = false; } -void MarkCompactCollector::EvacuatePages( - CompactionSpaceCollection* compaction_spaces) { - for (int i = 0; i < evacuation_candidates_.length(); i++) { +void MarkCompactCollector::EvacuatePages() { + int npages = evacuation_candidates_.length(); + int abandoned_pages = 0; + for (int i = 0; i < npages; i++) { Page* p = evacuation_candidates_[i]; DCHECK(p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); DCHECK(static_cast(p->parallel_sweeping()) == MemoryChunk::SWEEPING_DONE); - if (p->parallel_compaction_state().TrySetValue( - MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { - if (p->IsEvacuationCandidate()) { - DCHECK_EQ(p->parallel_compaction_state().Value(), - MemoryChunk::kCompactingInProgress); - if (EvacuateLiveObjectsFromPage( - p, compaction_spaces->Get(p->owner()->identity()))) { - p->parallel_compaction_state().SetValue( - MemoryChunk::kCompactingFinalize); - } else { - p->parallel_compaction_state().SetValue( - MemoryChunk::kCompactingAborted); + PagedSpace* space = static_cast(p->owner()); + // Allocate emergency memory for the case when compaction fails due to out + // of memory. + if (!space->HasEmergencyMemory()) { + space->CreateEmergencyMemory(); // If the OS lets us. + } + if (p->IsEvacuationCandidate()) { + // During compaction we might have to request a new page in order to free + // up a page. Check that we actually got an emergency page above so we + // can guarantee that this succeeds. + if (space->HasEmergencyMemory()) { + EvacuateLiveObjectsFromPage(p, static_cast(p->owner())); + // Unlink the page from the list of pages here. We must not iterate + // over that page later (e.g. when scan on scavenge pages are + // processed). The page itself will be freed later and is still + // reachable from the evacuation candidates list. + p->Unlink(); + } else { + // Without room for expansion evacuation is not guaranteed to succeed. + // Pessimistically abandon unevacuated pages. + for (int j = i; j < npages; j++) { + Page* page = evacuation_candidates_[j]; + slots_buffer_allocator_->DeallocateChain( + page->slots_buffer_address()); + page->ClearEvacuationCandidate(); + page->SetFlag(Page::RESCAN_ON_EVACUATION); } + abandoned_pages = npages - i; + break; + } + } + } + if (npages > 0) { + // Release emergency memory. + PagedSpaces spaces(heap()); + for (PagedSpace* space = spaces.next(); space != NULL; + space = spaces.next()) { + if (space->HasEmergencyMemory()) { + space->FreeEmergencyMemory(); + } + } + if (FLAG_trace_fragmentation) { + if (abandoned_pages != 0) { + PrintF( + " Abandon %d out of %d page defragmentations due to lack of " + "memory\n", + abandoned_pages, npages); } else { - // There could be popular pages in the list of evacuation candidates - // which we do compact. - p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); + PrintF(" Defragmented %d pages\n", npages); } } } @@ -3710,7 +3657,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PAGES); EvacuationScope evacuation_scope(this); - EvacuatePagesInParallel(); + if (FLAG_parallel_compaction) { + EvacuatePagesInParallel(); + WaitUntilCompactionCompleted(); + } else { + EvacuatePages(); + } } // Second pass: find pointers to new space and update them. @@ -3770,15 +3722,13 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { PrintF(" page %p slots buffer: %d\n", reinterpret_cast(p), SlotsBuffer::SizeOfChain(p->slots_buffer())); } - slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); // Important: skip list should be cleared only after roots were updated // because root iteration traverses the stack and might have to find // code objects from non-updated pc pointing into evacuation candidate. SkipList* list = p->skip_list(); if (list != NULL) list->Clear(); - } - if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { + } else { if (FLAG_gc_verbose) { PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", reinterpret_cast(p)); @@ -3808,12 +3758,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { break; } } - if (p->IsEvacuationCandidate() && - p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { - // Case where we've aborted compacting a page. Clear the flag here to - // avoid release the page later on. - p->ClearEvacuationCandidate(); - } } } @@ -3860,6 +3804,7 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() { PagedSpace* space = static_cast(p->owner()); space->Free(p->area_start(), p->area_size()); p->set_scan_on_scavenge(false); + slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); p->ResetLiveBytes(); space->ReleasePage(p); } @@ -4475,6 +4420,10 @@ void MarkCompactCollector::SweepSpaces() { // Deallocate evacuated candidate pages. ReleaseEvacuationCandidates(); + CodeRange* code_range = heap()->isolate()->code_range(); + if (code_range != NULL && code_range->valid()) { + code_range->ReserveEmergencyBlock(); + } if (FLAG_print_cumulative_gc_stat) { heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 714ad16..e0b7c38 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -553,11 +553,8 @@ class MarkCompactCollector { // Synchronize sweeper threads. base::Semaphore pending_sweeper_jobs_semaphore_; - // Synchronize compaction tasks. - base::Semaphore pending_compaction_tasks_semaphore_; - - // Number of active compaction tasks (including main thread). - intptr_t concurrent_compaction_tasks_active_; + // Synchronize compaction threads. + base::Semaphore pending_compaction_jobs_semaphore_; bool evacuation_; @@ -715,16 +712,11 @@ class MarkCompactCollector { void EvacuateNewSpace(); - bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space); + void EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space); - void EvacuatePages(CompactionSpaceCollection* compaction_spaces); - void EvacuatePagesInParallel(); + void EvacuatePages(); - int NumberOfParallelCompactionTasks() { - // TODO(hpayer, mlippautz): Figure out some logic to determine the number - // of compaction tasks. - return 1; - } + void EvacuatePagesInParallel(); void WaitUntilCompactionCompleted(); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 4e15117..b1c9557 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -80,7 +80,8 @@ CodeRange::CodeRange(Isolate* isolate) code_range_(NULL), free_list_(0), allocation_list_(0), - current_allocation_block_index_(0) {} + current_allocation_block_index_(0), + emergency_block_() {} bool CodeRange::SetUp(size_t requested) { @@ -139,6 +140,7 @@ bool CodeRange::SetUp(size_t requested) { current_allocation_block_index_ = 0; LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); + ReserveEmergencyBlock(); return true; } @@ -274,6 +276,24 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) { } +void CodeRange::ReserveEmergencyBlock() { + const size_t requested_size = MemoryAllocator::CodePageAreaSize(); + if (emergency_block_.size == 0) { + ReserveBlock(requested_size, &emergency_block_); + } else { + DCHECK(emergency_block_.size >= requested_size); + } +} + + +void CodeRange::ReleaseEmergencyBlock() { + if (emergency_block_.size != 0) { + ReleaseBlock(&emergency_block_); + emergency_block_.size = 0; + } +} + + // ----------------------------------------------------------------------------- // MemoryAllocator // @@ -472,7 +492,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->progress_bar_ = 0; chunk->high_water_mark_.SetValue(static_cast(area_start - base)); chunk->set_parallel_sweeping(SWEEPING_DONE); - chunk->parallel_compaction_state().SetValue(kCompactingDone); chunk->mutex_ = NULL; chunk->available_in_small_free_list_ = 0; chunk->available_in_medium_free_list_ = 0; @@ -955,7 +974,8 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, : Space(heap, space, executable), free_list_(this), unswept_free_bytes_(0), - end_of_unswept_pages_(NULL) { + end_of_unswept_pages_(NULL), + emergency_memory_(NULL) { area_size_ = MemoryAllocator::PageAreaSize(space); accounting_stats_.Clear(); @@ -983,37 +1003,30 @@ void PagedSpace::TearDown() { } -void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { - DCHECK(identity() == other->identity()); - // Destroy the linear allocation space of {other}. This is needed to - // (a) not waste the memory and - // (b) keep the rest of the chunk in an iterable state (filler is needed). - other->EmptyAllocationInfo(); - - // Move over the free list. Concatenate makes sure that the source free list - // gets properly reset after moving over all nodes. - intptr_t freed_bytes = free_list_.Concatenate(other->free_list()); - other->accounting_stats_.AllocateBytes(freed_bytes); - // We do not adjust accounting_stats_ for {this} as we treat the received - // memory as borrowed, i.e., the originating space keeps track of its - // capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly - // maintained by allocating and freeing blocks. -} - - void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { // Unmerged fields: // area_size_ // allocation_info_ + // emergency_memory_ // end_of_unswept_pages_ // unswept_free_bytes_ // anchor_ - MoveOverFreeMemory(other); + // It only makes sense to merge compatible spaces. + DCHECK(identity() == other->identity()); + + // Destroy the linear allocation space of {other}. This is needed to (a) not + // waste the memory and (b) keep the rest of the chunk in an iterable state + // (filler is needed). + int linear_size = static_cast(other->limit() - other->top()); + other->Free(other->top(), linear_size); + + // Move over the free list. + free_list_.Concatenate(other->free_list()); // Update and clear accounting statistics. accounting_stats_.Merge(other->accounting_stats_); - other->accounting_stats_.Reset(); + other->accounting_stats_.Clear(); // Move over pages. PageIterator it(other); @@ -1097,6 +1110,9 @@ bool PagedSpace::Expand() { if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); + DCHECK(heap()->CommittedOldGenerationMemory() <= + heap()->MaxOldGenerationSize() + + PagedSpace::MaxEmergencyMemoryAllocated()); p->InsertAfter(anchor_.prev_page()); @@ -1166,6 +1182,51 @@ void PagedSpace::ReleasePage(Page* page) { } +intptr_t PagedSpace::MaxEmergencyMemoryAllocated() { + // New space and large object space. + static const int spaces_without_emergency_memory = 2; + static const int spaces_with_emergency_memory = + LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory; + return Page::kPageSize * spaces_with_emergency_memory; +} + + +void PagedSpace::CreateEmergencyMemory() { + if (identity() == CODE_SPACE) { + // Make the emergency block available to the allocator. + CodeRange* code_range = heap()->isolate()->code_range(); + if (code_range != NULL && code_range->valid()) { + code_range->ReleaseEmergencyBlock(); + } + DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize()); + } + emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( + AreaSize(), AreaSize(), executable(), this); +} + + +void PagedSpace::FreeEmergencyMemory() { + Page* page = static_cast(emergency_memory_); + DCHECK(page->LiveBytes() == 0); + DCHECK(AreaSize() == page->area_size()); + DCHECK(!free_list_.ContainsPageFreeListItems(page)); + heap()->isolate()->memory_allocator()->Free(page); + emergency_memory_ = NULL; +} + + +void PagedSpace::UseEmergencyMemory() { + // Page::Initialize makes the chunk into a real page and adds it to the + // accounting for this space. Unlike PagedSpace::Expand, we don't check + // CanExpand first, so we can go over the limits a little here. That's OK, + // because we are in the process of compacting which will free up at least as + // much memory as it allocates. + Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); + page->InsertAfter(anchor_.prev_page()); + emergency_memory_ = NULL; +} + + #ifdef DEBUG void PagedSpace::Print() {} #endif diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 4bb6222..e7a0334 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -268,19 +268,6 @@ class SlotsBuffer; // any heap object. class MemoryChunk { public: - // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. - // |kCompactingInProgress|: Parallel compaction is currently in progress. - // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to - // be finalized. - // |kCompactingAborted|: Parallel compaction has been aborted, which should - // for now only happen in OOM scenarios. - enum ParallelCompactingState { - kCompactingDone, - kCompactingInProgress, - kCompactingFinalize, - kCompactingAborted, - }; - // Only works if the pointer is in the first kPageSize of the MemoryChunk. static MemoryChunk* FromAddress(Address a) { return reinterpret_cast(OffsetFrom(a) & ~kAlignmentMask); @@ -471,10 +458,6 @@ class MemoryChunk { base::Release_Store(¶llel_sweeping_, state); } - AtomicValue& parallel_compaction_state() { - return parallel_compaction_; - } - bool TryLock() { return mutex_->TryLock(); } base::Mutex* mutex() { return mutex_; } @@ -583,7 +566,6 @@ class MemoryChunk { + kPointerSize // AtomicValue high_water_mark_ + kPointerSize // base::Mutex* mutex_ + kPointerSize // base::AtomicWord parallel_sweeping_ - + kPointerSize // AtomicValue parallel_compaction_ + 5 * kPointerSize // AtomicNumber free-list statistics + kPointerSize // base::AtomicWord next_chunk_ + kPointerSize; // base::AtomicWord prev_chunk_ @@ -744,7 +726,6 @@ class MemoryChunk { base::Mutex* mutex_; base::AtomicWord parallel_sweeping_; - AtomicValue parallel_compaction_; // PagedSpace free-list statistics. AtomicNumber available_in_small_free_list_; @@ -1005,6 +986,9 @@ class CodeRange { bool UncommitRawMemory(Address start, size_t length); void FreeRawMemory(Address buf, size_t length); + void ReserveEmergencyBlock(); + void ReleaseEmergencyBlock(); + private: // Frees the range of virtual memory, and frees the data structures used to // manage it. @@ -1047,6 +1031,12 @@ class CodeRange { List allocation_list_; int current_allocation_block_index_; + // Emergency block guarantees that we can always allocate a page for + // evacuation candidates when code space is compacted. Emergency block is + // reserved immediately after GC and is released immedietely before + // allocating a page for evacuation. + FreeBlock emergency_block_; + // Finds a block on the allocation list that contains at least the // requested amount of memory. If none is found, sorts and merges // the existing free memory blocks, and searches again. @@ -1979,12 +1969,17 @@ class PagedSpace : public Space { // Return size of allocatable area on a page in this space. inline int AreaSize() { return area_size_; } + void CreateEmergencyMemory(); + void FreeEmergencyMemory(); + void UseEmergencyMemory(); + intptr_t MaxEmergencyMemoryAllocated(); + + bool HasEmergencyMemory() { return emergency_memory_ != NULL; } + // Merges {other} into the current space. Note that this modifies {other}, // e.g., removes its bump pointer area and resets statistics. void MergeCompactionSpace(CompactionSpace* other); - void MoveOverFreeMemory(PagedSpace* other); - protected: // PagedSpaces that should be included in snapshots have different, i.e., // smaller, initial pages. @@ -2045,6 +2040,12 @@ class PagedSpace : public Space { // end_of_unswept_pages_ page. Page* end_of_unswept_pages_; + // Emergency memory is the memory of a full page for a given space, allocated + // conservatively before evacuating a page. If compaction fails due to out + // of memory error the emergency memory can be used to complete compaction. + // If not used, the emergency memory is released after compaction. + MemoryChunk* emergency_memory_; + // Mutex guarding any concurrent access to the space. base::Mutex space_mutex_; @@ -2744,32 +2745,6 @@ class CompactionSpace : public PagedSpace { }; -// A collection of |CompactionSpace|s used by a single compaction task. -class CompactionSpaceCollection : public Malloced { - public: - explicit CompactionSpaceCollection(Heap* heap) - : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE), - code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {} - - CompactionSpace* Get(AllocationSpace space) { - switch (space) { - case OLD_SPACE: - return &old_space_; - case CODE_SPACE: - return &code_space_; - default: - UNREACHABLE(); - } - UNREACHABLE(); - return nullptr; - } - - private: - CompactionSpace old_space_; - CompactionSpace code_space_; -}; - - // ----------------------------------------------------------------------------- // Old object space (includes the old space of objects and code space) -- 2.7.4