From: hpayer Date: Mon, 3 Aug 2015 14:12:25 +0000 (-0700) Subject: Reland of land concurrent sweeping of code space. (patchset #1 id:1 of https://codere... X-Git-Tag: upstream/4.7.83~1064 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=0924d6d542854ef4d78adfd947b1ff2d1cd35778;p=platform%2Fupstream%2Fv8.git Reland of land concurrent sweeping of code space. (patchset #1 id:1 of https://codereview.chromium.org/1263343002/) Reason for revert: Bogus revert. Original issue's description: > Revert of Reland concurrent sweeping of code space. (patchset #6 id:100001 of https://codereview.chromium.org/1242333002/) > > Reason for revert: > Reverted because 507840 came back on recent Chromecrash. Should not have committed this Cl. > > Original issue's description: > > Reland concurrent sweeping of code space. > > > > BUG= > > > > Committed: https://crrev.com/8516dccf6a561020441773c93c564dd4aa6ee59e > > Cr-Commit-Position: refs/heads/master@{#29967} > > TBR=jochen@chromium.org > NOPRESUBMIT=true > NOTREECHECKS=true > NOTRY=true > BUG= > > Committed: https://crrev.com/5c6e7d00438cc82a5584e3178d7dadf36e4a34f8 > Cr-Commit-Position: refs/heads/master@{#29975} TBR=jochen@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG= Review URL: https://codereview.chromium.org/1271773002 Cr-Commit-Position: refs/heads/master@{#29976} --- diff --git a/src/frames.cc b/src/frames.cc index 5f51dec..ebe0cb1 100644 --- a/src/frames.cc +++ b/src/frames.cc @@ -1446,6 +1446,9 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer( // after the inner pointer. Page* page = Page::FromAddress(inner_pointer); + DCHECK_EQ(page->owner(), heap->code_space()); + heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page); + Address addr = page->skip_list()->StartFor(inner_pointer); Address top = heap->code_space()->top(); diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 3df2249..4f5a064 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -509,6 +509,7 @@ const char* Heap::GetSpaceName(int idx) { void Heap::ClearAllICsByKind(Code::Kind kind) { + // TODO(mvstanton): Do not iterate the heap. HeapObjectIterator it(code_space()); for (Object* object = it.Next(); object != NULL; object = it.Next()) { @@ -5195,6 +5196,11 @@ void Heap::Verify() { code_space_->Verify(&no_dirty_regions_visitor); lo_space_->Verify(); + + mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode(); + if (FLAG_omit_map_checks_for_leaf_maps) { + mark_compact_collector_.VerifyOmittedMapChecks(); + } } #endif diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 57549f8..d31ee46 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -226,6 +226,7 @@ static void VerifyEvacuation(Heap* heap) { void MarkCompactCollector::SetUp() { free_list_old_space_.Reset(new FreeList(heap_->old_space())); + free_list_code_space_.Reset(new FreeList(heap_->code_space())); EnsureMarkingDequeIsReserved(); EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize); } @@ -368,13 +369,6 @@ void MarkCompactCollector::CollectGarbage() { SweepSpaces(); -#ifdef VERIFY_HEAP - VerifyWeakEmbeddedObjectsInCode(); - if (FLAG_omit_map_checks_for_leaf_maps) { - VerifyOmittedMapChecks(); - } -#endif - Finish(); if (marking_parity_ == EVEN_MARKING_PARITY) { @@ -501,9 +495,27 @@ class MarkCompactCollector::SweeperTask : public v8::Task { void MarkCompactCollector::StartSweeperThreads() { DCHECK(free_list_old_space_.get()->IsEmpty()); + DCHECK(free_list_code_space_.get()->IsEmpty()); V8::GetCurrentPlatform()->CallOnBackgroundThread( new SweeperTask(heap(), heap()->old_space()), v8::Platform::kShortRunningTask); + V8::GetCurrentPlatform()->CallOnBackgroundThread( + new SweeperTask(heap(), heap()->code_space()), + v8::Platform::kShortRunningTask); +} + + +void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { + PagedSpace* owner = reinterpret_cast(page->owner()); + if (!page->SweepingCompleted()) { + SweepInParallel(page, owner); + if (!page->SweepingCompleted()) { + // We were not able to sweep that page, i.e., a concurrent + // sweeper thread currently owns this page. Wait for the sweeper + // thread to be done with this page. + page->WaitUntilSweepingCompleted(); + } + } } @@ -514,15 +526,19 @@ void MarkCompactCollector::EnsureSweepingCompleted() { // here. if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) { SweepInParallel(heap()->paged_space(OLD_SPACE), 0); + SweepInParallel(heap()->paged_space(CODE_SPACE), 0); } // Wait twice for both jobs. if (heap()->concurrent_sweeping_enabled()) { pending_sweeper_jobs_semaphore_.Wait(); + pending_sweeper_jobs_semaphore_.Wait(); } ParallelSweepSpacesComplete(); sweeping_in_progress_ = false; RefillFreeList(heap()->paged_space(OLD_SPACE)); + RefillFreeList(heap()->paged_space(CODE_SPACE)); heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes(); + heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes(); #ifdef VERIFY_HEAP if (FLAG_verify_heap && !evacuation()) { @@ -547,6 +563,8 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) { if (space == heap()->old_space()) { free_list = free_list_old_space_.get(); + } else if (space == heap()->code_space()) { + free_list = free_list_code_space_.get(); } else { // Any PagedSpace might invoke RefillFreeLists, so we need to make sure // to only refill them for the old space. @@ -3479,14 +3497,17 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, DCHECK(reinterpret_cast(free_start) % (32 * kPointerSize) == 0); int offsets[16]; + // If we use the skip list for code space pages, we have to lock the skip + // list because it could be accessed concurrently by the runtime or the + // deoptimizer. SkipList* skip_list = p->skip_list(); - int curr_region = -1; if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { skip_list->Clear(); } intptr_t freed_bytes = 0; intptr_t max_freed_bytes = 0; + int curr_region = -1; for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { Address cell_base = it.CurrentCellBase(); @@ -4247,10 +4268,19 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { return 0; } page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS); - FreeList* free_list = free_list_old_space_.get(); + FreeList* free_list; FreeList private_free_list(space); - max_freed = Sweep(space, &private_free_list, page, NULL); + if (space->identity() == CODE_SPACE) { + free_list = free_list_code_space_.get(); + max_freed = + Sweep(space, &private_free_list, page, NULL); + } else { + free_list = free_list_old_space_.get(); + max_freed = + Sweep(space, &private_free_list, page, NULL); + } free_list->Concatenate(&private_free_list); page->mutex()->Unlock(); } @@ -4308,8 +4338,19 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } - Sweep(space, NULL, p, NULL); + if (space->identity() == CODE_SPACE) { + if (FLAG_zap_code_space) { + Sweep(space, NULL, p, NULL); + } else { + Sweep(space, NULL, p, NULL); + } + } else { + DCHECK(space->identity() == OLD_SPACE); + Sweep(space, NULL, p, NULL); + } pages_swept++; parallel_sweeping_active = true; } else { @@ -4326,13 +4367,17 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { if (FLAG_gc_verbose) { PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast(p)); } - if (space->identity() == CODE_SPACE && FLAG_zap_code_space) { - Sweep(space, NULL, p, NULL); - } else if (space->identity() == CODE_SPACE) { - Sweep(space, NULL, p, NULL); + if (space->identity() == CODE_SPACE) { + if (FLAG_zap_code_space) { + Sweep(space, NULL, p, NULL); + } else { + Sweep(space, NULL, p, NULL); + } } else { + DCHECK(space->identity() == OLD_SPACE || + space->identity() == MAP_SPACE); Sweep(space, NULL, p, NULL); } @@ -4372,21 +4417,24 @@ void MarkCompactCollector::SweepSpaces() { // the other spaces rely on possibly non-live maps to get the sizes for // non-live objects. { - GCTracer::Scope sweep_scope(heap()->tracer(), - GCTracer::Scope::MC_SWEEP_OLDSPACE); - { SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); } + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_OLDSPACE); + SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); + } + { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_CODE); + SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING); + } + sweeping_in_progress_ = true; if (heap()->concurrent_sweeping_enabled()) { StartSweeperThreads(); } } - RemoveDeadInvalidatedCode(); - { - GCTracer::Scope sweep_scope(heap()->tracer(), - GCTracer::Scope::MC_SWEEP_CODE); - SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING); - } + RemoveDeadInvalidatedCode(); EvacuateNewSpaceAndCandidates(); @@ -4439,6 +4487,7 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) { void MarkCompactCollector::ParallelSweepSpacesComplete() { ParallelSweepSpaceComplete(heap()->old_space()); + ParallelSweepSpaceComplete(heap()->code_space()); } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index b4a4fe7..8f68917 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -699,6 +699,8 @@ class MarkCompactCollector { void EnsureSweepingCompleted(); + void SweepOrWaitUntilSweepingCompleted(Page* page); + // If sweeper threads are not active this method will return true. If // this is a latency issue we should be smarter here. Otherwise, it will // return true if the sweeper threads are done processing the pages. @@ -979,6 +981,7 @@ class MarkCompactCollector { List invalidated_code_; base::SmartPointer free_list_old_space_; + base::SmartPointer free_list_code_space_; friend class Heap; }; diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index f198210..3a5a4d6 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -75,6 +75,8 @@ bool HeapObjectIterator::AdvanceToNextPage() { } cur_page = cur_page->next_page(); if (cur_page == space_->anchor()) return false; + cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted( + cur_page); cur_addr_ = cur_page->area_start(); cur_end_ = cur_page->area_end(); DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc index a5191dc..a196844 100644 --- a/src/heap/store-buffer.cc +++ b/src/heap/store-buffer.cc @@ -468,16 +468,8 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { } } } else { - if (!page->SweepingCompleted()) { - heap_->mark_compact_collector()->SweepInParallel(page, owner); - if (!page->SweepingCompleted()) { - // We were not able to sweep that page, i.e., a concurrent - // sweeper thread currently owns this page. Wait for the sweeper - // thread to be done with this page. - page->WaitUntilSweepingCompleted(); - } - } - CHECK(page->owner() == heap_->old_space()); + heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted( + page); HeapObjectIterator iterator(page, NULL); for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; heap_object = iterator.Next()) {