Revert of Reland concurrent sweeping of code space. (patchset #6 id:100001 of https...
authorhpayer <hpayer@chromium.org>
Mon, 3 Aug 2015 13:06:42 +0000 (06:06 -0700)
committerCommit bot <commit-bot@chromium.org>
Mon, 3 Aug 2015 13:06:55 +0000 (13:06 +0000)
Reason for revert:
Reverted because 507840 came back on recent Chromecrash. Should not have committed this Cl.

Original issue's description:
> Reland concurrent sweeping of code space.
>
> BUG=
>
> Committed: https://crrev.com/8516dccf6a561020441773c93c564dd4aa6ee59e
> Cr-Commit-Position: refs/heads/master@{#29967}

TBR=jochen@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1263343002

Cr-Commit-Position: refs/heads/master@{#29975}

src/frames.cc
src/heap/heap.cc
src/heap/mark-compact.cc
src/heap/mark-compact.h
src/heap/spaces.cc
src/heap/store-buffer.cc

index ebe0cb121c34d0eec9a2f11272bdb9ddef53bf3d..5f51dec2882cab9c473032d47c15bfce5d711105 100644 (file)
@@ -1446,9 +1446,6 @@ Code* InnerPointerToCodeCache::GcSafeFindCodeForInnerPointer(
   // after the inner pointer.
   Page* page = Page::FromAddress(inner_pointer);
 
-  DCHECK_EQ(page->owner(), heap->code_space());
-  heap->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(page);
-
   Address addr = page->skip_list()->StartFor(inner_pointer);
 
   Address top = heap->code_space()->top();
index 4f5a064cca37a0f51ce644a815da5786008d664a..3df224995eff4807d2da1208b8c270363c011640 100644 (file)
@@ -509,7 +509,6 @@ const char* Heap::GetSpaceName(int idx) {
 
 
 void Heap::ClearAllICsByKind(Code::Kind kind) {
-  // TODO(mvstanton): Do not iterate the heap.
   HeapObjectIterator it(code_space());
 
   for (Object* object = it.Next(); object != NULL; object = it.Next()) {
@@ -5196,11 +5195,6 @@ void Heap::Verify() {
   code_space_->Verify(&no_dirty_regions_visitor);
 
   lo_space_->Verify();
-
-  mark_compact_collector_.VerifyWeakEmbeddedObjectsInCode();
-  if (FLAG_omit_map_checks_for_leaf_maps) {
-    mark_compact_collector_.VerifyOmittedMapChecks();
-  }
 }
 #endif
 
index d31ee46b4ea4cb20802a61205eb1051c3f87183e..57549f8036216d5bece3a06f1ff759bd3f859e18 100644 (file)
@@ -226,7 +226,6 @@ static void VerifyEvacuation(Heap* heap) {
 
 void MarkCompactCollector::SetUp() {
   free_list_old_space_.Reset(new FreeList(heap_->old_space()));
-  free_list_code_space_.Reset(new FreeList(heap_->code_space()));
   EnsureMarkingDequeIsReserved();
   EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
 }
@@ -369,6 +368,13 @@ void MarkCompactCollector::CollectGarbage() {
 
   SweepSpaces();
 
+#ifdef VERIFY_HEAP
+  VerifyWeakEmbeddedObjectsInCode();
+  if (FLAG_omit_map_checks_for_leaf_maps) {
+    VerifyOmittedMapChecks();
+  }
+#endif
+
   Finish();
 
   if (marking_parity_ == EVEN_MARKING_PARITY) {
@@ -495,27 +501,9 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
 
 void MarkCompactCollector::StartSweeperThreads() {
   DCHECK(free_list_old_space_.get()->IsEmpty());
-  DCHECK(free_list_code_space_.get()->IsEmpty());
   V8::GetCurrentPlatform()->CallOnBackgroundThread(
       new SweeperTask(heap(), heap()->old_space()),
       v8::Platform::kShortRunningTask);
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new SweeperTask(heap(), heap()->code_space()),
-      v8::Platform::kShortRunningTask);
-}
-
-
-void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
-  PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-  if (!page->SweepingCompleted()) {
-    SweepInParallel(page, owner);
-    if (!page->SweepingCompleted()) {
-      // We were not able to sweep that page, i.e., a concurrent
-      // sweeper thread currently owns this page. Wait for the sweeper
-      // thread to be done with this page.
-      page->WaitUntilSweepingCompleted();
-    }
-  }
 }
 
 
@@ -526,19 +514,15 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
   // here.
   if (!heap()->concurrent_sweeping_enabled() || !IsSweepingCompleted()) {
     SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
-    SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
   }
   // Wait twice for both jobs.
   if (heap()->concurrent_sweeping_enabled()) {
     pending_sweeper_jobs_semaphore_.Wait();
-    pending_sweeper_jobs_semaphore_.Wait();
   }
   ParallelSweepSpacesComplete();
   sweeping_in_progress_ = false;
   RefillFreeList(heap()->paged_space(OLD_SPACE));
-  RefillFreeList(heap()->paged_space(CODE_SPACE));
   heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
-  heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
 
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && !evacuation()) {
@@ -563,8 +547,6 @@ void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
 
   if (space == heap()->old_space()) {
     free_list = free_list_old_space_.get();
-  } else if (space == heap()->code_space()) {
-    free_list = free_list_code_space_.get();
   } else {
     // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
     // to only refill them for the old space.
@@ -3497,17 +3479,14 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
   int offsets[16];
 
-  // If we use the skip list for code space pages, we have to lock the skip
-  // list because it could be accessed concurrently by the runtime or the
-  // deoptimizer.
   SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
   if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
     skip_list->Clear();
   }
 
   intptr_t freed_bytes = 0;
   intptr_t max_freed_bytes = 0;
-  int curr_region = -1;
 
   for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
     Address cell_base = it.CurrentCellBase();
@@ -4268,19 +4247,10 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
       return 0;
     }
     page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
-    FreeList* free_list;
+    FreeList* free_list = free_list_old_space_.get();
     FreeList private_free_list(space);
-    if (space->identity() == CODE_SPACE) {
-      free_list = free_list_code_space_.get();
-      max_freed =
-          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
-    } else {
-      free_list = free_list_old_space_.get();
-      max_freed =
-          Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
-                IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
-    }
+    max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                      IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
     free_list->Concatenate(&private_free_list);
     page->mutex()->Unlock();
   }
@@ -4338,19 +4308,8 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
             PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
                    reinterpret_cast<intptr_t>(p));
           }
-          if (space->identity() == CODE_SPACE) {
-            if (FLAG_zap_code_space) {
-              Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                    ZAP_FREE_SPACE>(space, NULL, p, NULL);
-            } else {
-              Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                    IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-            }
-          } else {
-            DCHECK(space->identity() == OLD_SPACE);
-            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
-                  IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-          }
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
           pages_swept++;
           parallel_sweeping_active = true;
         } else {
@@ -4367,17 +4326,13 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
         }
-        if (space->identity() == CODE_SPACE) {
-          if (FLAG_zap_code_space) {
-            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  ZAP_FREE_SPACE>(space, NULL, p, NULL);
-          } else {
-            Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
-                  IGNORE_FREE_SPACE>(space, NULL, p, NULL);
-          }
+        if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                ZAP_FREE_SPACE>(space, NULL, p, NULL);
+        } else if (space->identity() == CODE_SPACE) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         } else {
-          DCHECK(space->identity() == OLD_SPACE ||
-                 space->identity() == MAP_SPACE);
           Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
                 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
         }
@@ -4417,25 +4372,22 @@ void MarkCompactCollector::SweepSpaces() {
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
   {
-    {
-      GCTracer::Scope sweep_scope(heap()->tracer(),
-                                  GCTracer::Scope::MC_SWEEP_OLDSPACE);
-      SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING);
-    }
-    {
-      GCTracer::Scope sweep_scope(heap()->tracer(),
-                                  GCTracer::Scope::MC_SWEEP_CODE);
-      SweepSpace(heap()->code_space(), CONCURRENT_SWEEPING);
-    }
-
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_OLDSPACE);
+    { SweepSpace(heap()->old_space(), CONCURRENT_SWEEPING); }
     sweeping_in_progress_ = true;
     if (heap()->concurrent_sweeping_enabled()) {
       StartSweeperThreads();
     }
   }
-
   RemoveDeadInvalidatedCode();
 
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_CODE);
+    SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
+  }
+
   EvacuateNewSpaceAndCandidates();
 
   heap()->FreeDeadArrayBuffers(false);
@@ -4487,7 +4439,6 @@ void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
 
 void MarkCompactCollector::ParallelSweepSpacesComplete() {
   ParallelSweepSpaceComplete(heap()->old_space());
-  ParallelSweepSpaceComplete(heap()->code_space());
 }
 
 
index 8f68917c319bdd3382fcf0b7a3d6adfff3e928c6..b4a4fe7fda2c423c6450ee513d3d127472c0b63e 100644 (file)
@@ -699,8 +699,6 @@ class MarkCompactCollector {
 
   void EnsureSweepingCompleted();
 
-  void SweepOrWaitUntilSweepingCompleted(Page* page);
-
   // If sweeper threads are not active this method will return true. If
   // this is a latency issue we should be smarter here. Otherwise, it will
   // return true if the sweeper threads are done processing the pages.
@@ -981,7 +979,6 @@ class MarkCompactCollector {
   List<Code*> invalidated_code_;
 
   base::SmartPointer<FreeList> free_list_old_space_;
-  base::SmartPointer<FreeList> free_list_code_space_;
 
   friend class Heap;
 };
index 3a5a4d6b58495e0c8be65e44de3116ace0232228..f19821070b7efcc2dbe9aef861b5154d86525ef5 100644 (file)
@@ -75,8 +75,6 @@ bool HeapObjectIterator::AdvanceToNextPage() {
   }
   cur_page = cur_page->next_page();
   if (cur_page == space_->anchor()) return false;
-  cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
-      cur_page);
   cur_addr_ = cur_page->area_start();
   cur_end_ = cur_page->area_end();
   DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
index a1968442541d166f33281e62644d1272d806200c..a5191dc748e34e944b015b52c67207c6ba537553 100644 (file)
@@ -468,8 +468,16 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
               }
             }
           } else {
-            heap_->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
-                page);
+            if (!page->SweepingCompleted()) {
+              heap_->mark_compact_collector()->SweepInParallel(page, owner);
+              if (!page->SweepingCompleted()) {
+                // We were not able to sweep that page, i.e., a concurrent
+                // sweeper thread currently owns this page. Wait for the sweeper
+                // thread to be done with this page.
+                page->WaitUntilSweepingCompleted();
+              }
+            }
+            CHECK(page->owner() == heap_->old_space());
             HeapObjectIterator iterator(page, NULL);
             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
                  heap_object = iterator.Next()) {