Reland of "[heap] Add more tasks for parallel compaction"
authormlippautz <mlippautz@chromium.org>
Fri, 25 Sep 2015 15:05:07 +0000 (08:05 -0700)
committerCommit bot <commit-bot@chromium.org>
Fri, 25 Sep 2015 15:05:15 +0000 (15:05 +0000)
- We now compute the number of parallel compaction tasks, depending on the
  evacuation candidate list, the number of cores, and some hard limit.
- Free memory is moved over to compaction tasks (up to some limit)
- Moving over memory is done by dividing the free list of a given space up among
  other free lists. Since this is potentially slow we limit the maximum amount
  of moved memory.

This reverts commit bfccd5187ceb21c99feea4538e08ca7aef48b65b.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1365743003

Cr-Commit-Position: refs/heads/master@{#30945}

src/heap/mark-compact.cc
src/heap/mark-compact.h
src/heap/spaces.cc
src/heap/spaces.h
test/cctest/test-spaces.cc

index 87e1b34f5c021acd57af14d222a5f107620a5e47..d090de066932274a5c344eaee91cf03c0d73fc6f 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "src/base/atomicops.h"
 #include "src/base/bits.h"
+#include "src/base/sys-info.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
 #include "src/cpu-profiler.h"
@@ -572,7 +573,6 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
   heap()->paged_space(OLD_SPACE)->ResetUnsweptFreeBytes();
   heap()->paged_space(CODE_SPACE)->ResetUnsweptFreeBytes();
   heap()->paged_space(MAP_SPACE)->ResetUnsweptFreeBytes();
-
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap && !evacuation()) {
     VerifyEvacuation(heap_);
@@ -593,7 +593,6 @@ bool MarkCompactCollector::IsSweepingCompleted() {
 
 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
   FreeList* free_list;
-
   if (space == heap()->old_space()) {
     free_list = free_list_old_space_.get();
   } else if (space == heap()->code_space()) {
@@ -3370,52 +3369,57 @@ bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
 }
 
 
+int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+  if (!FLAG_parallel_compaction) return 1;
+  // We cap the number of parallel compaction tasks by
+  // - (#cores - 1)
+  // - a value depending on the list of evacuation candidates
+  // - a hard limit
+  const int kPagesPerCompactionTask = 4;
+  const int kMaxCompactionTasks = 8;
+  return Min(kMaxCompactionTasks,
+             Min(1 + evacuation_candidates_.length() / kPagesPerCompactionTask,
+                 Max(1, base::SysInfo::NumberOfProcessors() - 1)));
+}
+
+
 void MarkCompactCollector::EvacuatePagesInParallel() {
   if (evacuation_candidates_.length() == 0) return;
 
-  int num_tasks = 1;
-  if (FLAG_parallel_compaction) {
-    num_tasks = NumberOfParallelCompactionTasks();
-  }
+  const int num_tasks = NumberOfParallelCompactionTasks();
 
   // Set up compaction spaces.
-  CompactionSpaceCollection** compaction_spaces_for_tasks =
+  CompactionSpaceCollection** spaces_for_tasks =
       new CompactionSpaceCollection*[num_tasks];
   for (int i = 0; i < num_tasks; i++) {
-    compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+    spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
   }
-
-  compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
-      heap()->old_space());
-  compaction_spaces_for_tasks[0]
-      ->Get(CODE_SPACE)
-      ->MoveOverFreeMemory(heap()->code_space());
+  heap()->old_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
+  heap()->code_space()->DivideMemory(spaces_for_tasks, num_tasks, 1 * MB);
 
   compaction_in_progress_ = true;
   // Kick off parallel tasks.
   for (int i = 1; i < num_tasks; i++) {
     concurrent_compaction_tasks_active_++;
     V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
+        new CompactionTask(heap(), spaces_for_tasks[i]),
         v8::Platform::kShortRunningTask);
   }
 
-  // Contribute in main thread. Counter and signal are in principal not needed.
-  concurrent_compaction_tasks_active_++;
-  EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
-  pending_compaction_tasks_semaphore_.Signal();
+  // Perform compaction on the main thread.
+  EvacuatePages(spaces_for_tasks[0], &migration_slots_buffer_);
 
   WaitUntilCompactionCompleted();
 
   // Merge back memory (compacted and unused) from compaction spaces.
   for (int i = 0; i < num_tasks; i++) {
     heap()->old_space()->MergeCompactionSpace(
-        compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+        spaces_for_tasks[i]->Get(OLD_SPACE));
     heap()->code_space()->MergeCompactionSpace(
-        compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
-    delete compaction_spaces_for_tasks[i];
+        spaces_for_tasks[i]->Get(CODE_SPACE));
+    delete spaces_for_tasks[i];
   }
-  delete[] compaction_spaces_for_tasks;
+  delete[] spaces_for_tasks;
 
   // Finalize sequentially.
   const int num_pages = evacuation_candidates_.length();
index 6558eb2ddebba2b6349c64eb2fbf4f2c9ea40247..724650c1c4e01492669f88524cc5ee071cb1b6ae 100644 (file)
@@ -709,11 +709,8 @@ class MarkCompactCollector {
 
   void EvacuatePagesInParallel();
 
-  int NumberOfParallelCompactionTasks() {
-    // TODO(hpayer, mlippautz): Figure out some logic to determine the number
-    // of compaction tasks.
-    return 1;
-  }
+  // The number of parallel compaction tasks, including the main thread.
+  int NumberOfParallelCompactionTasks();
 
   void WaitUntilCompactionCompleted();
 
index cd8a72951c6b3d162f783628d629fb28e93a43ae..69a37a554565e1577c601fddb9f0f11c22e0720a 100644 (file)
@@ -1014,7 +1014,7 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
 
   // Update and clear accounting statistics.
   accounting_stats_.Merge(other->accounting_stats_);
-  other->accounting_stats_.Reset();
+  other->accounting_stats_.Clear();
 
   // Move over pages.
   PageIterator it(other);
@@ -2213,6 +2213,44 @@ intptr_t FreeList::Concatenate(FreeList* free_list) {
 }
 
 
+FreeSpace* PagedSpace::TryRemoveMemory() {
+  FreeSpace* space = nullptr;
+  int node_size = 0;
+  space = free_list()->FindNodeIn(FreeList::kHuge, &node_size);
+  if (space == nullptr)
+    space = free_list()->FindNodeIn(FreeList::kLarge, &node_size);
+  if (space == nullptr)
+    space = free_list()->FindNodeIn(FreeList::kMedium, &node_size);
+  if (space == nullptr)
+    space = free_list()->FindNodeIn(FreeList::kSmall, &node_size);
+  if (space != nullptr) {
+    accounting_stats_.AllocateBytes(node_size);
+  }
+  return space;
+}
+
+
+void PagedSpace::DivideMemory(CompactionSpaceCollection** other, int num,
+                              intptr_t limit) {
+  CHECK(num > 0);
+  CHECK(other != nullptr);
+
+  if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+  EmptyAllocationInfo();
+
+  int index = 0;
+  FreeSpace* node = nullptr;
+  for (CompactionSpace* space = other[index]->Get(identity());
+       ((node = TryRemoveMemory()) != nullptr) &&
+       (space->free_list()->available() < limit);
+       space = other[++index % num]->Get(identity())) {
+    CHECK(space->identity() == identity());
+    space->AddMemory(node->address(), node->size());
+  }
+}
+
+
 void FreeList::Reset() {
   small_list_.Reset();
   medium_list_.Reset();
@@ -2256,39 +2294,62 @@ int FreeList::Free(Address start, int size_in_bytes) {
 }
 
 
+void FreeList::UpdateFragmentationStats(FreeListCategoryType category,
+                                        Address address, int size) {
+  Page* page = Page::FromAddress(address);
+  switch (category) {
+    case kSmall:
+      page->add_available_in_small_free_list(size);
+      break;
+    case kMedium:
+      page->add_available_in_medium_free_list(size);
+      break;
+    case kLarge:
+      page->add_available_in_large_free_list(size);
+      break;
+    case kHuge:
+      page->add_available_in_huge_free_list(size);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+  FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+  if (node != nullptr) {
+    UpdateFragmentationStats(category, node->address(), -(*node_size));
+    DCHECK(IsVeryLong() || available() == SumFreeLists());
+  }
+  return node;
+}
+
+
 FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
   FreeSpace* node = NULL;
   Page* page = NULL;
 
   if (size_in_bytes <= kSmallAllocationMax) {
-    node = small_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
+    node = FindNodeIn(kSmall, node_size);
+    if (node != nullptr) {
+      DCHECK(size_in_bytes <= node->size());
       return node;
     }
   }
 
   if (size_in_bytes <= kMediumAllocationMax) {
-    node = medium_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
+    node = FindNodeIn(kMedium, node_size);
+    if (node != nullptr) {
+      DCHECK(size_in_bytes <= node->size());
       return node;
     }
   }
 
   if (size_in_bytes <= kLargeAllocationMax) {
-    node = large_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      DCHECK(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-      DCHECK(IsVeryLong() || available() == SumFreeLists());
+    node = FindNodeIn(kLarge, node_size);
+    if (node != nullptr) {
+      DCHECK(size_in_bytes <= node->size());
       return node;
     }
   }
@@ -2544,7 +2605,6 @@ intptr_t PagedSpace::SizeOfObjects() {
          (unswept_free_bytes_ == 0));
   const intptr_t size = Size() - unswept_free_bytes_ - (limit() - top());
   DCHECK_GE(size, 0);
-  USE(size);
   return size;
 }
 
index 2cea06673abf136a0e3b013680e2cc6d59418e71..cdfb6e6af9a6229b5b9befc47795534de33cd166 100644 (file)
@@ -19,6 +19,7 @@
 namespace v8 {
 namespace internal {
 
+class CompactionSpaceCollection;
 class Isolate;
 
 // -----------------------------------------------------------------------------
@@ -1420,19 +1421,11 @@ class AllocationInfo {
 
 
 // An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste.  The
-// capacity is the sum of size, waste, and available.
 //
 // The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
+// functions increase or decrease one of the non-capacity stats in conjunction
+// with capacity, or else they always balance increases and decreases to the
+// non-capacity stats.
 class AllocationStats BASE_EMBEDDED {
  public:
   AllocationStats() { Clear(); }
@@ -1443,6 +1436,7 @@ class AllocationStats BASE_EMBEDDED {
     max_capacity_ = 0;
     size_ = 0;
     waste_ = 0;
+    borrowed_ = 0;
   }
 
   void ClearSizeWaste() {
@@ -1462,6 +1456,7 @@ class AllocationStats BASE_EMBEDDED {
   intptr_t MaxCapacity() { return max_capacity_; }
   intptr_t Size() { return size_; }
   intptr_t Waste() { return waste_; }
+  intptr_t Borrowed() { return borrowed_; }
 
   // Grow the space by adding available bytes.  They are initially marked as
   // being in use (part of the size), but will normally be immediately freed,
@@ -1479,15 +1474,19 @@ class AllocationStats BASE_EMBEDDED {
   // during sweeping, bytes have been marked as being in use (part of the size)
   // and are hereby freed.
   void ShrinkSpace(int size_in_bytes) {
+    DCHECK_GE(size_in_bytes, 0);
     capacity_ -= size_in_bytes;
     size_ -= size_in_bytes;
-    DCHECK(size_ >= 0);
+    DCHECK_GE(size_, 0);
+    DCHECK_GE(capacity_, 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
+    DCHECK_GE(size_in_bytes, 0);
     size_ += size_in_bytes;
-    DCHECK(size_ >= 0);
+    DCHECK_GE(size_, 0);
+    DCHECK_LE(size_, capacity_);
   }
 
   // Free allocated bytes, making them available (size -> available).
@@ -1504,26 +1503,60 @@ class AllocationStats BASE_EMBEDDED {
 
   // Merge {other} into {this}.
   void Merge(const AllocationStats& other) {
+    DCHECK_GE(other.capacity_, 0);
+    DCHECK_GE(other.size_, 0);
+    DCHECK_GE(other.waste_, 0);
     capacity_ += other.capacity_;
     size_ += other.size_;
+    // See description of |borrowed_| below why we need to remove it from
+    // |capacity_| as well as |size_|.
+    capacity_ -= other.borrowed_;
+    size_ -= other.borrowed_;
     waste_ += other.waste_;
-    if (other.max_capacity_ > max_capacity_) {
-      max_capacity_ = other.max_capacity_;
+    if (capacity_ > max_capacity_) {
+      max_capacity_ = capacity_;
     }
   }
 
   void DecreaseCapacity(intptr_t size_in_bytes) {
+    DCHECK_GE(size_in_bytes, 0);
     capacity_ -= size_in_bytes;
-    DCHECK_GE(capacity_, 0);
+    DCHECK_GE(capacity_, size_);
   }
 
-  void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+  void IncreaseCapacity(intptr_t size_in_bytes) {
+    DCHECK_GE(size_in_bytes, 0);
+    capacity_ += size_in_bytes;
+  }
+
+  void BorrowMemory(intptr_t size_in_bytes) {
+    DCHECK_GE(size_in_bytes, 0);
+    borrowed_ += size_in_bytes;
+  }
 
  private:
+  // |capacity_| is the number of object-area bytes (i.e., not including page
+  // bookkeeping structures) currently in the space.
   intptr_t capacity_;
+
+  // |max_capacity_| is the maximum |capacity_| ever observed by a space.
   intptr_t max_capacity_;
+
+  // |size_| is the number of allocated bytes.
   intptr_t size_;
+
+  // |waste_| is the number of bytes that are not allocated and not available
+  // to allocation without reorganizing the space via a GC (e.g. small blocks
+  // due to internal fragmentation, top of page areas in map space
   intptr_t waste_;
+
+  // |borrowed_| denotes the number of bytes that are currently borrowed in this
+  // space, i.e., they have been accounted as allocated in another space, but
+  // have been moved over (e.g. through a free list) to the current space.
+  // Note that accounting them as allocated results in them being included
+  // in |size_| as well as |capacity_| of the original space. The temporary
+  // double-accounting is fixed upon merging accounting stats.
+  intptr_t borrowed_;
 };
 
 
@@ -1682,6 +1715,8 @@ class FreeList {
   PagedSpace* owner() { return owner_; }
 
  private:
+  enum FreeListCategoryType { kSmall, kMedium, kLarge, kHuge };
+
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
@@ -1695,6 +1730,27 @@ class FreeList {
   static const int kLargeAllocationMax = kMediumListMax;
 
   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
+  FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
+
+  FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
+    switch (category) {
+      case kSmall:
+        return &small_list_;
+      case kMedium:
+        return &medium_list_;
+      case kLarge:
+        return &large_list_;
+      case kHuge:
+        return &huge_list_;
+      default:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  void UpdateFragmentationStats(FreeListCategoryType category, Address address,
+                                int size);
 
   PagedSpace* owner_;
   Heap* heap_;
@@ -1703,6 +1759,8 @@ class FreeList {
   FreeListCategory large_list_;
   FreeListCategory huge_list_;
 
+  friend class PagedSpace;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
 
@@ -1985,7 +2043,25 @@ class PagedSpace : public Space {
 
   virtual bool is_local() { return false; }
 
+  // Divide {this} free lists up among {other} CompactionSpaceCollections
+  // up to some certain {limit} of bytes. Note that this operation eventually
+  // needs to iterate over nodes one-by-one, making it a potentially slow
+  // operation.
+  void DivideMemory(CompactionSpaceCollection** other, int num, intptr_t limit);
+
  protected:
+  // Adds memory starting at {start} of {size_in_bytes} to the space.
+  void AddMemory(Address start, int size_in_bytes) {
+    IncreaseCapacity(size_in_bytes);
+    accounting_stats_.BorrowMemory(size_in_bytes);
+    Free(start, size_in_bytes);
+  }
+
+  // Tries to remove some memory from {this} free lists. We try to remove
+  // as much memory as possible, i.e., we check the free lists from huge
+  // to small.
+  FreeSpace* TryRemoveMemory();
+
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
   virtual bool snapshotable() { return true; }
@@ -2741,12 +2817,6 @@ class CompactionSpace : public PagedSpace {
   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
       : PagedSpace(heap, id, executable) {}
 
-  // Adds external memory starting at {start} of {size_in_bytes} to the space.
-  void AddExternalMemory(Address start, int size_in_bytes) {
-    IncreaseCapacity(size_in_bytes);
-    Free(start, size_in_bytes);
-  }
-
   virtual bool is_local() { return true; }
 
  protected:
index a744bb79a7d4658acd5209158b021a4095d8c48e..7e684150e17fc38b59a895ac7da0e172824403e1 100644 (file)
@@ -458,8 +458,8 @@ TEST(CompactionSpaceUsingExternalMemory) {
   CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
   TestMemoryAllocatorScope test_scope(isolate, allocator);
 
-  CompactionSpace* compaction_space =
-      new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
+  CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
+  CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
   CHECK(compaction_space != NULL);
   CHECK(compaction_space->SetUp());
 
@@ -498,17 +498,11 @@ TEST(CompactionSpaceUsingExternalMemory) {
   // We expect two pages to be reachable from old_space in the end.
   const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
 
-  Object* chunk =
-      old_space->AllocateRawUnaligned(static_cast<int>(rest)).ToObjectChecked();
   CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
-  CHECK(chunk != nullptr);
-  CHECK(chunk->IsHeapObject());
-
   CHECK_EQ(compaction_space->CountTotalPages(), 0);
   CHECK_EQ(compaction_space->Capacity(), 0);
   // Make the rest of memory available for compaction.
-  compaction_space->AddExternalMemory(HeapObject::cast(chunk)->address(),
-                                      static_cast<int>(rest));
+  old_space->DivideMemory(&collection, 1, rest);
   CHECK_EQ(compaction_space->CountTotalPages(), 0);
   CHECK_EQ(compaction_space->Capacity(), rest);
   while (num_rest_objects-- > 0) {
@@ -525,7 +519,7 @@ TEST(CompactionSpaceUsingExternalMemory) {
   old_space->MergeCompactionSpace(compaction_space);
   CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
 
-  delete compaction_space;
+  delete collection;
   delete old_space;
 
   allocator->TearDown();