Revert of "[heap] Add more tasks for parallel compaction" (patchset #4 id:100001...
authormlippautz <mlippautz@chromium.org>
Fri, 25 Sep 2015 15:40:27 +0000 (08:40 -0700)
committerCommit bot <commit-bot@chromium.org>
Fri, 25 Sep 2015 15:41:22 +0000 (15:41 +0000)
Reason for revert:
failing again: https://chromegw.corp.google.com/i/client.v8/builders/V8%20Mac/builds/4505/steps/Mozilla%20%28flakes%29/logs/regress-416628

Original issue's description:
> Reland of "[heap] Add more tasks for parallel compaction"
>
> - We now compute the number of parallel compaction tasks, depending on the
>   evacuation candidate list, the number of cores, and some hard limit.
> - Free memory is moved over to compaction tasks (up to some limit)
> - Moving over memory is done by dividing the free list of a given space up among
>   other free lists. Since this is potentially slow we limit the maximum amount
>   of moved memory.
>
> This reverts commit bfccd5187ceb21c99feea4538e08ca7aef48b65b.
>
> BUG=chromium:524425
> LOG=N
>
> Committed: https://crrev.com/7e283d746a194ceaaca114e2ba17504653d6a109
> Cr-Commit-Position: refs/heads/master@{#30945}

TBR=hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:524425

Review URL: https://codereview.chromium.org/1371653002

Cr-Commit-Position: refs/heads/master@{#30947}

src/heap/mark-compact.cc
src/heap/mark-compact.h
src/heap/spaces.cc
src/heap/spaces.h
test/cctest/test-spaces.cc

index d090de0..87e1b34 100644 (file)
@@ -6,7 +6,6 @@
 
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
-#include "src/base/sys-info.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
 #include "src/cpu-profiler.h"
@@ -573,6 +572,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
   heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
   heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
   heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
+
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && !evacuation()) {
     VerifyEvacuation(heap_);
@@ -593,6 +593,7 @@ bool MarkCompactCollector::IsSweepingCompleted() {
 
 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
   FreeList* free_list;
+
   if (space == heap()->old_space()) {
     free_list = free_list_old_space_.get();
   } else if (space == heap()->code_space()) {
@@ -3369,57 +3370,52 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
 }
 
 
-int MarkCompactCollector::NumberOfParallelCompactionTasks() {
-  if (!FLAG_parallel_compaction) return 1;
-  // We cap the number of parallel compaction tasks by
-  // - (#cores - 1)
-  // - a value depending on the list of evacuation candidates
-  // - a hard limit
-  const int kPagesPerCompactionTask = 4;
-  const int kMaxCompactionTasks = 8;
-  return Min(kMaxCompactionTasks,
-             Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
-                 Max(1, base::SysInfo::NumberOfProcessors() - 1)));
-}
-
-
 void MarkCompactCollector::EvacuatePagesInParallel() {
   if (evacuation_candidates_.length() == 0) return;
 
-  const int num_tasks = NumberOfParallelCompactionTasks();
+  int num_tasks = 1;
+  if (FLAG_parallel_compaction) {
+    num_tasks = NumberOfParallelCompactionTasks();
+  }
 
   // Set up compaction spaces.
-  CompactionSpaceCollection** spaces_for_tasks =
+  CompactionSpaceCollection** compaction_spaces_for_tasks =
       new CompactionSpaceCollection*[num_tasks];
   for (int i = 0; i < num_tasks; i++) {
-    spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+    compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
   }
-  heap()->old_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
-  heap()->code_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
+
+  compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
+      heap()->old_space());
+  compaction_spaces_for_tasks[0]
+      ->Get(CODE_SPACE)
+      ->MoveOverFreeMemory(heap()->code_space());
 
   compaction_in_progress_ = true;
   // Kick off parallel tasks.
   for (int i = 1; i < num_tasks; i++) {
     concurrent_compaction_tasks_active_++;
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new CompactionTask(heap(), spaces_for_tasks[i]),
+        new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
         v8::Platform::kShortRunningTask);
   }
 
-  // Perform compaction on the main thread.
-  EvacuatePages(spaces_for_tasks[0], &migration_slots_buffer_);
+  // Contribute in main thread. Counter and signal are in principal not needed.
+  concurrent_compaction_tasks_active_++;
+  EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
+  pending_compaction_tasks_semaphore_.Signal();
 
   WaitUntilCompactionCompleted();
 
   // Merge back memory (compacted and unused) from compaction spaces.
   for (int i = 0; i < num_tasks; i++) {
     heap()->old_space()->MergeCompactionSpace(
-        spaces_for_tasks[i]->Get(OLD_SPACE));
+        compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
     heap()->code_space()->MergeCompactionSpace(
-        spaces_for_tasks[i]->Get(CODE_SPACE));
-    delete spaces_for_tasks[i];
+        compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+    delete compaction_spaces_for_tasks[i];
   }
-  delete[] spaces_for_tasks;
+  delete[] compaction_spaces_for_tasks;
 
   // Finalize sequentially.
   const int num_pages = evacuation_candidates_.length();
index 724650c..6558eb2 100644 (file)
@@ -709,8 +709,11 @@ class MarkCompactCollector {
 
   void EvacuatePagesInParallel();
 
-  // The number of parallel compaction tasks, including the main thread.
-  int NumberOfParallelCompactionTasks();
+  int NumberOfParallelCompactionTasks() {
+    // TODO(hpayer, mlippautz): Figure out some logic to determine the number
+    // of compaction tasks.
+    return 1;
+  }
 
   void WaitUntilCompactionCompleted();
 
index 69a37a5..cd8a729 100644 (file)
@@ -1014,7 +1014,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
 
   // Update and clear accounting statistics.
   accounting_stats_.Merge(other->accounting_stats_);
-  other->accounting_stats_.Clear();
+  other->accounting_stats_.Reset();
 
   // Move over pages.
   PageIterator it(other);
@@ -2213,44 +2213,6 @@ intptr_t FreeList::Concatenate(FreeList* free_list) {
 }
 
 
-FreeSpace* PagedSpace::TryRemoveMemory() {
-  FreeSpace* space = nullptr;
-  int node_size = 0;
-  space = free_list()->FindNodeIn(FreeList::kHuge, &node_size);
-  if (space == nullptr)
-    space = free_list()->FindNodeIn(FreeList::kLarge, &node_size);
-  if (space == nullptr)
-    space = free_list()->FindNodeIn(FreeList::kMedium, &node_size);
-  if (space == nullptr)
-    space = free_list()->FindNodeIn(FreeList::kSmall, &node_size);
-  if (space != nullptr) {
-    accounting_stats_.AllocateBytes(node_size);
-  }
-  return space;
-}
-
-
-void PagedSpace::DivideMemory(CompactionSpaceCollection** other, int num,
-                              intptr_t limit) {
-  CHECK(num > 0);
-  CHECK(other != nullptr);
-
-  if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
-
-  EmptyAllocationInfo();
-
-  int index = 0;
-  FreeSpace* node = nullptr;
-  for (CompactionSpace* space = other[index]->Get(identity());
-       ((node = TryRemoveMemory()) != nullptr) &&
-       (space->free_list()->available() < limit);
-       space = other[++index % num]->Get(identity())) {
-    CHECK(space->identity() == identity());
-    space->AddMemory(node->address(), node->size());
-  }
-}
-
-
 void FreeList::Reset() {
   small_list_.Reset();
   medium_list_.Reset();
@@ -2294,62 +2256,39 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
-void FreeList::UpdateFragmentationStats(FreeListCategoryType category,
-                                        Address address, int size) {
-  Page* page = Page::FromAddress(address);
-  switch (category) {
-    case kSmall:
-      page->add_available_in_small_free_list(size);
-      break;
-    case kMedium:
-      page->add_available_in_medium_free_list(size);
-      break;
-    case kLarge:
-      page->add_available_in_large_free_list(size);
-      break;
-    case kHuge:
-      page->add_available_in_huge_free_list(size);
-      break;
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
-  FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
-  if (node != nullptr) {
-    UpdateFragmentationStats(category, node->address(), -(*node_size));
-    DCHECK(IsVeryLong() || available() == SumFreeLists());
-  }
-  return node;
-}
-
-
 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   FreeSpace* node = NULL;
   Page* page = NULL;
 
   if (size_in_bytes <= kSmallAllocationMax) {
-    node = FindNodeIn(kSmall, node_size);
-    if (node != nullptr) {
-      DCHECK(size_in_bytes <= node->size());
+    node = small_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_small_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
       return node;
     }
   }
 
   if (size_in_bytes <= kMediumAllocationMax) {
-    node = FindNodeIn(kMedium, node_size);
-    if (node != nullptr) {
-      DCHECK(size_in_bytes <= node->size());
+    node = medium_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_medium_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
       return node;
     }
   }
 
   if (size_in_bytes <= kLargeAllocationMax) {
-    node = FindNodeIn(kLarge, node_size);
-    if (node != nullptr) {
-      DCHECK(size_in_bytes <= node->size());
+    node = large_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_large_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
       return node;
     }
   }
@@ -2605,6 +2544,7 @@ intptr_t PagedSpace::SizeOfObjects() {
          (unswept_free_bytes_ == 0));
   const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
   DCHECK_GE(size, 0);
+  USE(size);
   return size;
 }
 
index cdfb6e6..2cea066 100644 (file)
@@ -19,7 +19,6 @@
 namespace v8 {
 namespace internal {
 
-class CompactionSpaceCollection;
 class Isolate;
 
 // -----------------------------------------------------------------------------
@@ -1421,11 +1420,19 @@ class AllocationInfo {
 
 
 // An abstraction of the accounting statistics of a page-structured space.
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
+// including page bookkeeping structures) currently in the space. The 'size'
+// of a space is the number of allocated bytes, the 'waste' in the space is
+// the number of bytes that are not allocated and not available to
+// allocation without reorganizing the space via a GC (e.g. small blocks due
+// to internal fragmentation, top of page areas in map space), and the bytes
+// 'available' is the number of unallocated bytes that are not waste.  The
+// capacity is the sum of size, waste, and available.
 //
 // The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in conjunction
-// with capacity, or else they always balance increases and decreases to the
-// non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in
+// conjunction with capacity, or else they always balance increases and
+// decreases to the non-capacity stats.
 class AllocationStats BASE_EMBEDDED {
  public:
   AllocationStats() { Clear(); }
@@ -1436,7 +1443,6 @@ class AllocationStats BASE_EMBEDDED {
     max_capacity_ = 0;
     size_ = 0;
     waste_ = 0;
-    borrowed_ = 0;
   }
 
   void ClearSizeWaste() {
@@ -1456,7 +1462,6 @@ class AllocationStats BASE_EMBEDDED {
   intptr_t MaxCapacity() { return max_capacity_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
-  intptr_t Borrowed() { return borrowed_; }
 
   // Grow the space by adding available bytes.  They are initially marked as
   // being in use (part of the size), but will normally be immediately freed,
@@ -1474,19 +1479,15 @@ class AllocationStats BASE_EMBEDDED {
   // during sweeping, bytes have been marked as being in use (part of the size)
   // and are hereby freed.
   void ShrinkSpace(int size_in_bytes) {
-    DCHECK_GE(size_in_bytes, 0);
     capacity_ -= size_in_bytes;
     size_ -= size_in_bytes;
-    DCHECK_GE(size_, 0);
-    DCHECK_GE(capacity_, 0);
+    DCHECK(size_ >= 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
-    DCHECK_GE(size_in_bytes, 0);
     size_ += size_in_bytes;
-    DCHECK_GE(size_, 0);
-    DCHECK_LE(size_, capacity_);
+    DCHECK(size_ >= 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
@@ -1503,60 +1504,26 @@ class AllocationStats BASE_EMBEDDED {
 
   // Merge {other} into {this}.
   void Merge(const AllocationStats& other) {
-    DCHECK_GE(other.capacity_, 0);
-    DCHECK_GE(other.size_, 0);
-    DCHECK_GE(other.waste_, 0);
     capacity_ += other.capacity_;
     size_ += other.size_;
-    // See description of |borrowed_| below why we need to remove it from
-    // |capacity_| as well as |size_|.
-    capacity_ -= other.borrowed_;
-    size_ -= other.borrowed_;
     waste_ += other.waste_;
-    if (capacity_ > max_capacity_) {
-      max_capacity_ = capacity_;
+    if (other.max_capacity_ > max_capacity_) {
+      max_capacity_ = other.max_capacity_;
     }
   }
 
   void DecreaseCapacity(intptr_t size_in_bytes) {
-    DCHECK_GE(size_in_bytes, 0);
     capacity_ -= size_in_bytes;
-    DCHECK_GE(capacity_, size_);
+    DCHECK_GE(capacity_, 0);
   }
 
-  void IncreaseCapacity(intptr_t size_in_bytes) {
-    DCHECK_GE(size_in_bytes, 0);
-    capacity_ += size_in_bytes;
-  }
-
-  void BorrowMemory(intptr_t size_in_bytes) {
-    DCHECK_GE(size_in_bytes, 0);
-    borrowed_ += size_in_bytes;
-  }
+  void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
 
  private:
-  // |capacity_| is the number of object-area bytes (i.e., not including page
-  // bookkeeping structures) currently in the space.
   intptr_t capacity_;
-
-  // |max_capacity_| is the maximum |capacity_| ever observed by a space.
   intptr_t max_capacity_;
-
-  // |size_| is the number of allocated bytes.
   intptr_t size_;
-
-  // |waste_| is the number of bytes that are not allocated and not available
-  // to allocation without reorganizing the space via a GC (e.g. small blocks
-  // due to internal fragmentation, top of page areas in map space
   intptr_t waste_;
-
-  // |borrowed_| denotes the number of bytes that are currently borrowed in this
-  // space, i.e., they have been accounted as allocated in another space, but
-  // have been moved over (e.g. through a free list) to the current space.
-  // Note that accounting them as allocated results in them being included
-  // in |size_| as well as |capacity_| of the original space. The temporary
-  // double-accounting is fixed upon merging accounting stats.
-  intptr_t borrowed_;
 };
 
 
@@ -1715,8 +1682,6 @@ class FreeList {
   PagedSpace* owner() { return owner_; }
 
  private:
-  enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
-
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
@@ -1730,27 +1695,6 @@ class FreeList {
   static const int kLargeAllocationMax = kMediumListMax;
 
   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
-  FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
-
-  FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
-    switch (category) {
-      case kSmall:
-        return &small_list_;
-      case kMedium:
-        return &medium_list_;
-      case kLarge:
-        return &large_list_;
-      case kHuge:
-        return &huge_list_;
-      default:
-        UNREACHABLE();
-    }
-    UNREACHABLE();
-    return nullptr;
-  }
-
-  void UpdateFragmentationStats(FreeListCategoryType category, Address address,
-                                int size);
 
   PagedSpace* owner_;
   Heap* heap_;
@@ -1759,8 +1703,6 @@ class FreeList {
   FreeListCategory large_list_;
   FreeListCategory huge_list_;
 
-  friend class PagedSpace;
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
 
@@ -2043,25 +1985,7 @@ class PagedSpace : public Space {
 
   virtual bool is_local() { return false; }
 
-  // Divide {this} free lists up among {other} CompactionSpaceCollections
-  // up to some certain {limit} of bytes. Note that this operation eventually
-  // needs to iterate over nodes one-by-one, making it a potentially slow
-  // operation.
-  void DivideMemory(CompactionSpaceCollection** other, int num, intptr_t limit);
-
  protected:
-  // Adds memory starting at {start} of {size_in_bytes} to the space.
-  void AddMemory(Address start, int size_in_bytes) {
-    IncreaseCapacity(size_in_bytes);
-    accounting_stats_.BorrowMemory(size_in_bytes);
-    Free(start, size_in_bytes);
-  }
-
-  // Tries to remove some memory from {this} free lists. We try to remove
-  // as much memory as possible, i.e., we check the free lists from huge
-  // to small.
-  FreeSpace* TryRemoveMemory();
-
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
   virtual bool snapshotable() { return true; }
@@ -2817,6 +2741,12 @@ class CompactionSpace : public PagedSpace {
   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
       : PagedSpace(heap, id, executable) {}
 
+  // Adds external memory starting at {start} of {size_in_bytes} to the space.
+  void AddExternalMemory(Address start, int size_in_bytes) {
+    IncreaseCapacity(size_in_bytes);
+    Free(start, size_in_bytes);
+  }
+
   virtual bool is_local() { return true; }
 
  protected:
index 7e68415..a744bb7 100644 (file)
@@ -458,8 +458,8 @@ TEST(CompactionSpaceUsingExternalMemory) {
   CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
   TestMemoryAllocatorScope test_scope(isolate, allocator);
 
-  CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
-  CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
+  CompactionSpace* compaction_space =
+      new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
   CHECK(compaction_space != NULL);
   CHECK(compaction_space->SetUp());
 
@@ -498,11 +498,17 @@ TEST(CompactionSpaceUsingExternalMemory) {
   // We expect two pages to be reachable from old_space in the end.
   const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
 
+  Object* chunk =
+      old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
   CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
+  CHECK(chunk != nullptr);
+  CHECK(chunk->IsHeapObject());
+
   CHECK_EQ(compaction_space->CountTotalPages(), 0);
   CHECK_EQ(compaction_space->Capacity(), 0);
   // Make the rest of memory available for compaction.
-  old_space->DivideMemory(&collection, 1, rest);
+  compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
+                                      static_cast<int>(rest));
   CHECK_EQ(compaction_space->CountTotalPages(), 0);
   CHECK_EQ(compaction_space->Capacity(), rest);
   while (num_rest_objects-- > 0) {
@@ -519,7 +525,7 @@ TEST(CompactionSpaceUsingExternalMemory) {
   old_space->MergeCompactionSpace(compaction_space);
   CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
 
-  delete collection;
+  delete compaction_space;
   delete old_space;
 
   allocator->TearDown();