sweeping_in_progress_(false),
parallel_compaction_in_progress_(false),
pending_sweeper_jobs_semaphore_(0),
- pending_compaction_tasks_semaphore_(0),
- concurrent_compaction_tasks_active_(0),
+ pending_compaction_jobs_semaphore_(0),
evacuation_(false),
slots_buffer_allocator_(nullptr),
migration_slots_buffer_(nullptr),
class MarkCompactCollector::CompactionTask : public v8::Task {
public:
- explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
- : heap_(heap), spaces_(spaces) {}
+ explicit CompactionTask(Heap* heap) : heap_(heap) {}
virtual ~CompactionTask() {}
private:
// v8::Task overrides.
void Run() override {
- heap_->mark_compact_collector()->EvacuatePages(spaces_);
+ // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be
+ // called by one thread concurrently.
+ heap_->mark_compact_collector()->EvacuatePages();
heap_->mark_compact_collector()
- ->pending_compaction_tasks_semaphore_.Signal();
+ ->pending_compaction_jobs_semaphore_.Signal();
}
Heap* heap_;
- CompactionSpaceCollection* spaces_;
DISALLOW_COPY_AND_ASSIGN(CompactionTask);
};
}
-bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(
Page* p, PagedSpace* target_space) {
AlwaysAllocateScope always_allocate(isolate());
DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
+ p->SetWasSwept();
int offsets[16];
HeapObject* target_object = nullptr;
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
if (!allocation.To(&target_object)) {
- return false;
+ // If allocation failed, use emergency memory and re-try allocation.
+ CHECK(target_space->HasEmergencyMemory());
+ target_space->UseEmergencyMemory();
+ allocation = target_space->AllocateRaw(size, alignment);
+ }
+ if (!allocation.To(&target_object)) {
+ // OS refused to give us memory.
+ V8::FatalProcessOutOfMemory("Evacuation");
+ return;
}
+
MigrateObject(target_object, object, size, target_space->identity());
DCHECK(object->map_word().IsForwardingAddress());
}
*cell = 0;
}
p->ResetLiveBytes();
- return true;
}
void MarkCompactCollector::EvacuatePagesInParallel() {
- if (evacuation_candidates_.length() == 0) return;
-
- int num_tasks = 1;
- if (FLAG_parallel_compaction) {
- num_tasks = NumberOfParallelCompactionTasks();
- }
-
- // Set up compaction spaces.
- CompactionSpaceCollection** compaction_spaces_for_tasks =
- new CompactionSpaceCollection*[num_tasks];
- for (int i = 0; i < num_tasks; i++) {
- compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
- }
-
- compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
- heap()->old_space());
- compaction_spaces_for_tasks[0]
- ->Get(CODE_SPACE)
- ->MoveOverFreeMemory(heap()->code_space());
-
parallel_compaction_in_progress_ = true;
- // Kick off parallel tasks.
- for (int i = 1; i < num_tasks; i++) {
- concurrent_compaction_tasks_active_++;
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
- v8::Platform::kShortRunningTask);
- }
-
- // Contribute in main thread. Counter and signal are in principal not needed.
- concurrent_compaction_tasks_active_++;
- EvacuatePages(compaction_spaces_for_tasks[0]);
- pending_compaction_tasks_semaphore_.Signal();
-
- WaitUntilCompactionCompleted();
-
- // Merge back memory (compacted and unused) from compaction spaces.
- for (int i = 0; i < num_tasks; i++) {
- heap()->old_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
- heap()->code_space()->MergeCompactionSpace(
- compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
- delete compaction_spaces_for_tasks[i];
- }
- delete[] compaction_spaces_for_tasks;
-
- // Finalize sequentially.
- const int num_pages = evacuation_candidates_.length();
- int abandoned_pages = 0;
- for (int i = 0; i < num_pages; i++) {
- Page* p = evacuation_candidates_[i];
- switch (p->parallel_compaction_state().Value()) {
- case MemoryChunk::ParallelCompactingState::kCompactingAborted:
- // We have partially compacted the page, i.e., some objects may have
- // moved, others are still in place.
- // We need to:
- // - Leave the evacuation candidate flag for later processing of
- // slots buffer entries.
- // - Leave the slots buffer there for processing of entries added by
- // the write barrier.
- // - Rescan the page as slot recording in the migration buffer only
- // happens upon moving (which we potentially didn't do).
- // - Leave the page in the list of pages of a space since we could not
- // fully evacuate it.
- DCHECK(p->IsEvacuationCandidate());
- p->SetFlag(Page::RESCAN_ON_EVACUATION);
- abandoned_pages++;
- break;
- case MemoryChunk::kCompactingFinalize:
- DCHECK(p->IsEvacuationCandidate());
- p->SetWasSwept();
- p->Unlink();
- break;
- case MemoryChunk::kCompactingDone:
- DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
- DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- break;
- default:
- // We should not observe kCompactingInProgress, or kCompactingDone.
- UNREACHABLE();
- }
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
- }
- if (num_pages > 0) {
- if (FLAG_trace_fragmentation) {
- if (abandoned_pages != 0) {
- PrintF(
- " Abandoned (at least partially) %d out of %d page compactions due"
- " to lack of memory\n",
- abandoned_pages, num_pages);
- } else {
- PrintF(" Compacted %d pages\n", num_pages);
- }
- }
- }
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompactionTask(heap()), v8::Platform::kShortRunningTask);
}
void MarkCompactCollector::WaitUntilCompactionCompleted() {
- while (concurrent_compaction_tasks_active_-- > 0) {
- pending_compaction_tasks_semaphore_.Wait();
- }
+ pending_compaction_jobs_semaphore_.Wait();
parallel_compaction_in_progress_ = false;
}
-void MarkCompactCollector::EvacuatePages(
- CompactionSpaceCollection* compaction_spaces) {
- for (int i = 0; i < evacuation_candidates_.length(); i++) {
+void MarkCompactCollector::EvacuatePages() {
+ int npages = evacuation_candidates_.length();
+ int abandoned_pages = 0;
+ for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
DCHECK(static_cast<int>(p->parallel_sweeping()) ==
MemoryChunk::SWEEPING_DONE);
- if (p->parallel_compaction_state().TrySetValue(
- MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
- if (p->IsEvacuationCandidate()) {
- DCHECK_EQ(p->parallel_compaction_state().Value(),
- MemoryChunk::kCompactingInProgress);
- if (EvacuateLiveObjectsFromPage(
- p, compaction_spaces->Get(p->owner()->identity()))) {
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingFinalize);
- } else {
- p->parallel_compaction_state().SetValue(
- MemoryChunk::kCompactingAborted);
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ // Allocate emergency memory for the case when compaction fails due to out
+ // of memory.
+ if (!space->HasEmergencyMemory()) {
+ space->CreateEmergencyMemory(); // If the OS lets us.
+ }
+ if (p->IsEvacuationCandidate()) {
+ // During compaction we might have to request a new page in order to free
+ // up a page. Check that we actually got an emergency page above so we
+ // can guarantee that this succeeds.
+ if (space->HasEmergencyMemory()) {
+ EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner()));
+ // Unlink the page from the list of pages here. We must not iterate
+ // over that page later (e.g. when scan on scavenge pages are
+ // processed). The page itself will be freed later and is still
+ // reachable from the evacuation candidates list.
+ p->Unlink();
+ } else {
+ // Without room for expansion evacuation is not guaranteed to succeed.
+ // Pessimistically abandon unevacuated pages.
+ for (int j = i; j < npages; j++) {
+ Page* page = evacuation_candidates_[j];
+ slots_buffer_allocator_->DeallocateChain(
+ page->slots_buffer_address());
+ page->ClearEvacuationCandidate();
+ page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
+ abandoned_pages = npages - i;
+ break;
+ }
+ }
+ }
+ if (npages > 0) {
+ // Release emergency memory.
+ PagedSpaces spaces(heap());
+ for (PagedSpace* space = spaces.next(); space != NULL;
+ space = spaces.next()) {
+ if (space->HasEmergencyMemory()) {
+ space->FreeEmergencyMemory();
+ }
+ }
+ if (FLAG_trace_fragmentation) {
+ if (abandoned_pages != 0) {
+ PrintF(
+ " Abandon %d out of %d page defragmentations due to lack of "
+ "memory\n",
+ abandoned_pages, npages);
} else {
- // There could be popular pages in the list of evacuation candidates
- // which we do compact.
- p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ PrintF(" Defragmented %d pages\n", npages);
}
}
}
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuationScope evacuation_scope(this);
- EvacuatePagesInParallel();
+ if (FLAG_parallel_compaction) {
+ EvacuatePagesInParallel();
+ WaitUntilCompactionCompleted();
+ } else {
+ EvacuatePages();
+ }
}
// Second pass: find pointers to new space and update them.
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
- slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
- }
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+ } else {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
reinterpret_cast<intptr_t>(p));
break;
}
}
- if (p->IsEvacuationCandidate() &&
- p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- // Case where we've aborted compacting a page. Clear the flag here to
- // avoid release the page later on.
- p->ClearEvacuationCandidate();
- }
}
}
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
space->ReleasePage(p);
}
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+ CodeRange* code_range = heap()->isolate()->code_range();
+ if (code_range != NULL && code_range->valid()) {
+ code_range->ReserveEmergencyBlock();
+ }
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
code_range_(NULL),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0) {}
+ current_allocation_block_index_(0),
+ emergency_block_() {}
bool CodeRange::SetUp(size_t requested) {
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+ ReserveEmergencyBlock();
return true;
}
}
+void CodeRange::ReserveEmergencyBlock() {
+ const size_t requested_size = MemoryAllocator::CodePageAreaSize();
+ if (emergency_block_.size == 0) {
+ ReserveBlock(requested_size, &emergency_block_);
+ } else {
+ DCHECK(emergency_block_.size >= requested_size);
+ }
+}
+
+
+void CodeRange::ReleaseEmergencyBlock() {
+ if (emergency_block_.size != 0) {
+ ReleaseBlock(&emergency_block_);
+ emergency_block_.size = 0;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// MemoryAllocator
//
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
chunk->set_parallel_sweeping(SWEEPING_DONE);
- chunk->parallel_compaction_state().SetValue(kCompactingDone);
chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
: Space(heap, space, executable),
free_list_(this),
unswept_free_bytes_(0),
- end_of_unswept_pages_(NULL) {
+ end_of_unswept_pages_(NULL),
+ emergency_memory_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
accounting_stats_.Clear();
}
-void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
- DCHECK(identity() == other->identity());
- // Destroy the linear allocation space of {other}. This is needed to
- // (a) not waste the memory and
- // (b) keep the rest of the chunk in an iterable state (filler is needed).
- other->EmptyAllocationInfo();
-
- // Move over the free list. Concatenate makes sure that the source free list
- // gets properly reset after moving over all nodes.
- intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
- other->accounting_stats_.AllocateBytes(freed_bytes);
- // We do not adjust accounting_stats_ for {this} as we treat the received
- // memory as borrowed, i.e., the originating space keeps track of its
- // capacity. Other stats, e.g. accounting_stats_.{size_,waste_} are properly
- // maintained by allocating and freeing blocks.
-}
-
-
void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Unmerged fields:
// area_size_
// allocation_info_
+ // emergency_memory_
// end_of_unswept_pages_
// unswept_free_bytes_
// anchor_
- MoveOverFreeMemory(other);
+ // It only makes sense to merge compatible spaces.
+ DCHECK(identity() == other->identity());
+
+ // Destroy the linear allocation space of {other}. This is needed to (a) not
+ // waste the memory and (b) keep the rest of the chunk in an iterable state
+ // (filler is needed).
+ int linear_size = static_cast<int>(other->limit() - other->top());
+ other->Free(other->top(), linear_size);
+
+ // Move over the free list.
+ free_list_.Concatenate(other->free_list());
// Update and clear accounting statistics.
accounting_stats_.Merge(other->accounting_stats_);
- other->accounting_stats_.Reset();
+ other->accounting_stats_.Clear();
// Move over pages.
PageIterator it(other);
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
+ DCHECK(heap()->CommittedOldGenerationMemory() <=
+ heap()->MaxOldGenerationSize() +
+ PagedSpace::MaxEmergencyMemoryAllocated());
p->InsertAfter(anchor_.prev_page());
}
+intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
+ // New space and large object space.
+ static const int spaces_without_emergency_memory = 2;
+ static const int spaces_with_emergency_memory =
+ LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
+ return Page::kPageSize * spaces_with_emergency_memory;
+}
+
+
+void PagedSpace::CreateEmergencyMemory() {
+ if (identity() == CODE_SPACE) {
+ // Make the emergency block available to the allocator.
+ CodeRange* code_range = heap()->isolate()->code_range();
+ if (code_range != NULL && code_range->valid()) {
+ code_range->ReleaseEmergencyBlock();
+ }
+ DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
+ }
+ emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
+ AreaSize(), AreaSize(), executable(), this);
+}
+
+
+void PagedSpace::FreeEmergencyMemory() {
+ Page* page = static_cast<Page*>(emergency_memory_);
+ DCHECK(page->LiveBytes() == 0);
+ DCHECK(AreaSize() == page->area_size());
+ DCHECK(!free_list_.ContainsPageFreeListItems(page));
+ heap()->isolate()->memory_allocator()->Free(page);
+ emergency_memory_ = NULL;
+}
+
+
+void PagedSpace::UseEmergencyMemory() {
+ // Page::Initialize makes the chunk into a real page and adds it to the
+ // accounting for this space. Unlike PagedSpace::Expand, we don't check
+ // CanExpand first, so we can go over the limits a little here. That's OK,
+ // because we are in the process of compacting which will free up at least as
+ // much memory as it allocates.
+ Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
+ page->InsertAfter(anchor_.prev_page());
+ emergency_memory_ = NULL;
+}
+
+
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
// any heap object.
class MemoryChunk {
public:
- // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
- // |kCompactingInProgress|: Parallel compaction is currently in progress.
- // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
- // be finalized.
- // |kCompactingAborted|: Parallel compaction has been aborted, which should
- // for now only happen in OOM scenarios.
- enum ParallelCompactingState {
- kCompactingDone,
- kCompactingInProgress,
- kCompactingFinalize,
- kCompactingAborted,
- };
-
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
base::Release_Store(¶llel_sweeping_, state);
}
- AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
- return parallel_compaction_;
- }
-
bool TryLock() { return mutex_->TryLock(); }
base::Mutex* mutex() { return mutex_; }
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::Mutex* mutex_
+ kPointerSize // base::AtomicWord parallel_sweeping_
- + kPointerSize // AtomicValue parallel_compaction_
+ 5 * kPointerSize // AtomicNumber free-list statistics
+ kPointerSize // base::AtomicWord next_chunk_
+ kPointerSize; // base::AtomicWord prev_chunk_
base::Mutex* mutex_;
base::AtomicWord parallel_sweeping_;
- AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.
AtomicNumber<intptr_t> available_in_small_free_list_;
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
+ void ReserveEmergencyBlock();
+ void ReleaseEmergencyBlock();
+
private:
// Frees the range of virtual memory, and frees the data structures used to
// manage it.
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
+ // Emergency block guarantees that we can always allocate a page for
+ // evacuation candidates when code space is compacted. Emergency block is
+ // reserved immediately after GC and is released immedietely before
+ // allocating a page for evacuation.
+ FreeBlock emergency_block_;
+
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
// Return size of allocatable area on a page in this space.
inline int AreaSize() { return area_size_; }
+ void CreateEmergencyMemory();
+ void FreeEmergencyMemory();
+ void UseEmergencyMemory();
+ intptr_t MaxEmergencyMemoryAllocated();
+
+ bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+
// Merges {other} into the current space. Note that this modifies {other},
// e.g., removes its bump pointer area and resets statistics.
void MergeCompactionSpace(CompactionSpace* other);
- void MoveOverFreeMemory(PagedSpace* other);
-
protected:
// PagedSpaces that should be included in snapshots have different, i.e.,
// smaller, initial pages.
// end_of_unswept_pages_ page.
Page* end_of_unswept_pages_;
+ // Emergency memory is the memory of a full page for a given space, allocated
+ // conservatively before evacuating a page. If compaction fails due to out
+ // of memory error the emergency memory can be used to complete compaction.
+ // If not used, the emergency memory is released after compaction.
+ MemoryChunk* emergency_memory_;
+
// Mutex guarding any concurrent access to the space.
base::Mutex space_mutex_;
};
-// A collection of |CompactionSpace|s used by a single compaction task.
-class CompactionSpaceCollection : public Malloced {
- public:
- explicit CompactionSpaceCollection(Heap* heap)
- : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
- code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
-
- CompactionSpace* Get(AllocationSpace space) {
- switch (space) {
- case OLD_SPACE:
- return &old_space_;
- case CODE_SPACE:
- return &code_space_;
- default:
- UNREACHABLE();
- }
- UNREACHABLE();
- return nullptr;
- }
-
- private:
- CompactionSpace old_space_;
- CompactionSpace code_space_;
-};
-
-
// -----------------------------------------------------------------------------
// Old object space (includes the old space of objects and code space)