From 806b81f11e3bfaef0d4330c7669e6934074be9cb Mon Sep 17 00:00:00 2001 From: hpayer Date: Mon, 20 Jul 2015 03:36:35 -0700 Subject: [PATCH] Reland concurrent sweeping of code space. BUG=chromium:506778,chromium:506957,chromium:507211 LOG=n Review URL: https://codereview.chromium.org/1225733002 Cr-Commit-Position: refs/heads/master@{#29748} --- src/frames.cc | 4 ++ src/heap/heap.cc | 11 +++++ src/heap/mark-compact.cc | 104 +++++++++++++++++++++++++++++++++++------------ src/heap/mark-compact.h | 3 ++ src/heap/store-buffer.cc | 12 +----- 5 files changed, 97 insertions(+), 37 deletions(-) diff --git a/src/frames.cc b/src/frames.cc index 66bcf3d..c978f72 100644 --- a/src/frames.cc +++ b/src/frames.cc @@ -1446,6 +1446,10 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer( // after the inner pointer. Page* page = Page::FromAddress(inner_pointer); + DCHECK(page->owner() == heap->code_space()); + heap->mark_compact_collector()->EnsureSweepingCompleted( + page, reinterpret_cast(page->owner())); + Address addr = page->skip_list()->StartFor(inner_pointer); Address top = heap->code_space()->top(); diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 3340a0f..04dd855 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -509,6 +509,12 @@ const char* Heap::GetSpaceName(int idx) { void Heap::ClearAllICsByKind(Code::Kind kind) { + // If concurrent sweeping is in progress, we have to wait for the sweeper + // threads before we iterate the heap. + if (mark_compact_collector()->sweeping_in_progress()) { + mark_compact_collector()->EnsureSweepingCompleted(); + } + // TODO(mvstanton): Do not iterate the heap. HeapObjectIterator it(code_space()); for (Object* object = it.Next(); object != NULL; object = it.Next()) { @@ -5157,6 +5163,11 @@ void Heap::Verify() { code_space_->Verify(&no_dirty_regions_visitor); lo_space_->Verify(); + + mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode(); + if (FLAG_omit_map_checks_for_leaf_maps) { + mark_compact_collector_.VerifyOmittedMapChecks(); + } } #endif diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 38008c3..4435ff4 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -226,6 +226,7 @@ static void VerifyEvacuation(Heap* heap) { void MarkCompactCollector::SetUp() { free_list_old_space_.Reset(new FreeList(heap_->old_space())); + free_list_code_space_.Reset(new FreeList(heap_->code_space())); EnsureMarkingDequeIsReserved(); EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); } @@ -366,13 +367,6 @@ void MarkCompactCollector::CollectGarbage() { SweepSpaces(); -#ifdef VERIFY_HEAP - VerifyWeakEmbeddedObjectsInCode(); - if (FLAG_omit_map_checks_for_leaf_maps) { - VerifyOmittedMapChecks(); - } -#endif - Finish(); if (marking_parity_ == EVEN_MARKING_PARITY) { @@ -499,9 +493,13 @@ class MarkCompactCollector::SweeperTask : public v8::Task { void MarkCompactCollector::StartSweeperThreads() { DCHECK(free_list_old_space_.get()->IsEmpty()); + DCHECK(free_list_code_space_.get()->IsEmpty()); V8::GetCurrentPlatform()->CallOnBackgroundThread( new SweeperTask(heap(), heap()->old_space()), v8::Platform::kShortRunningTask); + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new SweeperTask(heap(), heap()->code_space()), + v8::Platform::kShortRunningTask); } @@ -512,15 +510,19 @@ void MarkCompactCollector::EnsureSweepingCompleted() { // here. if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) { SweepInParallel(heap()->paged_space(OLD_SPACE), 0); + SweepInParallel(heap()->paged_space(CODE_SPACE), 0); } // Wait twice for both jobs. if (heap()->concurrent_sweeping_enabled()) { pending_sweeper_jobs_semaphore_.Wait(); + pending_sweeper_jobs_semaphore_.Wait(); } ParallelSweepSpacesComplete(); sweeping_in_progress_ = false; RefillFreeList(heap()->paged_space(OLD_SPACE)); + RefillFreeList(heap()->paged_space(CODE_SPACE)); heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes(); + heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes(); #ifdef VERIFY_HEAP if (FLAG_verify_heap && !evacuation()) { @@ -530,6 +532,21 @@ void MarkCompactCollector::EnsureSweepingCompleted() { } +void MarkCompactCollector::EnsureSweepingCompleted(Page* page, + PagedSpace* space) { + if (!page->SweepingCompleted()) { + SweepInParallel(page, space); + if (!page->SweepingCompleted()) { + // We were not able to sweep that page, i.e., a concurrent + // sweeper thread currently owns this page. + // TODO(hpayer): This may introduce a huge pause here. We + // just care about finish sweeping of the scan on scavenge page. + EnsureSweepingCompleted(); + } + } +} + + bool MarkCompactCollector::IsSweepingCompleted() { if (!pending_sweeper_jobs_semaphore_.WaitFor( base::TimeDelta::FromSeconds(0))) { @@ -545,6 +562,8 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) { if (space == heap()->old_space()) { free_list = free_list_old_space_.get(); + } else if (space == heap()->code_space()) { + free_list = free_list_code_space_.get(); } else { // Any PagedSpace might invoke RefillFreeLists, so we need to make sure // to only refill them for the old space. @@ -3493,14 +3512,17 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, DCHECK(reinterpret_cast(free_start) % (32 * kPointerSize) == 0); int offsets[16]; + // If we use the skip list for code space pages, we have to lock the skip + // list because it could be accessed concurrently by the runtime or the + // deoptimizer. SkipList* skip_list = p->skip_list(); - int curr_region = -1; if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { skip_list->Clear(); } intptr_t freed_bytes = 0; intptr_t max_freed_bytes = 0; + int curr_region = -1; for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { Address cell_base = it.CurrentCellBase(); @@ -4164,10 +4186,19 @@ int MarkCompactCollector::SweepInParallel(PagedSpace* space, int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { int max_freed = 0; if (page->TryParallelSweeping()) { - FreeList* free_list = free_list_old_space_.get(); + FreeList* free_list; FreeList private_free_list(space); - max_freed = Sweep(space, &private_free_list, page, NULL); + if (space->identity() == CODE_SPACE) { + free_list = free_list_code_space_.get(); + max_freed = + Sweep(space, &private_free_list, page, NULL); + } else { + free_list = free_list_old_space_.get(); + max_freed = + Sweep(space, &private_free_list, page, NULL); + } free_list->Concatenate(&private_free_list); } return max_freed; @@ -4224,8 +4255,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } - Sweep(space, NULL, p, NULL); + if (space->identity() == CODE_SPACE) { + if (FLAG_zap_code_space) { + Sweep(space, NULL, p, NULL); + } else { + Sweep(space, NULL, p, NULL); + } + } else { + DCHECK(space->identity() == OLD_SPACE); + Sweep(space, NULL, p, NULL); + } pages_swept++; parallel_sweeping_active = true; } else { @@ -4242,13 +4284,17 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { if (FLAG_gc_verbose) { PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } - if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { - Sweep(space, NULL, p, NULL); - } else if (space->identity() == CODE_SPACE) { - Sweep(space, NULL, p, NULL); + if (space->identity() == CODE_SPACE) { + if (FLAG_zap_code_space) { + Sweep(space, NULL, p, NULL); + } else { + Sweep(space, NULL, p, NULL); + } } else { + DCHECK(space->identity() == OLD_SPACE || + space->identity() == MAP_SPACE); Sweep(space, NULL, p, NULL); } @@ -4288,19 +4334,22 @@ void MarkCompactCollector::SweepSpaces() { // the other spaces rely on possibly non-live maps to get the sizes for // non-live objects. { - GCTracer::Scope sweep_scope(heap()->tracer(), - GCTracer::Scope::MC_SWEEP_OLDSPACE); - { SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); } + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_OLDSPACE); + SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); + } + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_CODE); + SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING); + } + sweeping_in_progress_ = true; if (heap()->concurrent_sweeping_enabled()) { StartSweeperThreads(); } } - { - GCTracer::Scope sweep_scope(heap()->tracer(), - GCTracer::Scope::MC_SWEEP_CODE); - SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING); - } EvacuateNewSpaceAndCandidates(); @@ -4353,6 +4402,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { void MarkCompactCollector::ParallelSweepSpacesComplete() { ParallelSweepSpaceComplete(heap()->old_space()); + ParallelSweepSpaceComplete(heap()->code_space()); } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index e212c95..b2a41ce 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -691,6 +691,8 @@ class MarkCompactCollector { void EnsureSweepingCompleted(); + void EnsureSweepingCompleted(Page* page, PagedSpace* space); + // If sweeper threads are not active this method will return true. If // this is a latency issue we should be smarter here. Otherwise, it will // return true if the sweeper threads are done processing the pages. @@ -967,6 +969,7 @@ class MarkCompactCollector { List evacuation_candidates_; base::SmartPointer free_list_old_space_; + base::SmartPointer free_list_code_space_; friend class Heap; }; diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc index 03f587f..38d8462 100644 --- a/src/heap/store-buffer.cc +++ b/src/heap/store-buffer.cc @@ -468,17 +468,9 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { } } } else { - if (!page->SweepingCompleted()) { - heap_->mark_compact_collector()->SweepInParallel(page, owner); - if (!page->SweepingCompleted()) { - // We were not able to sweep that page, i.e., a concurrent - // sweeper thread currently owns this page. - // TODO(hpayer): This may introduce a huge pause here. We - // just care about finish sweeping of the scan on scavenge page. - heap_->mark_compact_collector()->EnsureSweepingCompleted(); - } - } CHECK(page->owner() == heap_->old_space()); + heap_->mark_compact_collector()->EnsureSweepingCompleted(page, + owner); HeapObjectIterator iterator(page, NULL); for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; heap_object = iterator.Next()) { -- 2.7.4