Reland "[heap] Introduce parallel compaction algorithm."
authormlippautz <mlippautz@chromium.org>
Thu, 17 Sep 2015 12:23:46 +0000 (05:23 -0700)
committerCommit bot <commit-bot@chromium.org>
Thu, 17 Sep 2015 12:23:55 +0000 (12:23 +0000)
This reverts commit 7a0a0b8b85e4cdf06795ffea01855b345776b932.

- The number of parallel tasks is still 1, i.e., we only compact on the main
  thread.
- Remove emergency memory (PagedSpace, and CodeRange)
- Introduce partial compaction of pages.
- Logic for multiple tasks is in place.

BUG=chromium:524425
LOG=N

Review URL: https://codereview.chromium.org/1356533002

Cr-Commit-Position: refs/heads/master@{#30796}

src/heap/mark-compact.cc
src/heap/mark-compact.h
src/heap/spaces.cc
src/heap/spaces.h

index ffea05a0af03299ec17868eb6f0e14390f37eacb..d51b03f64f1537bc41a0d20ad16d417ef78727ed 100644 (file)
@@ -57,7 +57,8 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
       sweeping_in_progress_(false),
       parallel_compaction_in_progress_(false),
       pending_sweeper_jobs_semaphore_(0),
-      pending_compaction_jobs_semaphore_(0),
+      pending_compaction_tasks_semaphore_(0),
+      concurrent_compaction_tasks_active_(0),
       evacuation_(false),
       slots_buffer_allocator_(nullptr),
       migration_slots_buffer_(nullptr),
@@ -474,21 +475,21 @@ void MarkCompactCollector::ClearMarkbits() {
 
 class MarkCompactCollector::CompactionTask : public v8::Task {
  public:
-  explicit CompactionTask(Heap* heap) : heap_(heap) {}
+  explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
+      : heap_(heap), spaces_(spaces) {}
 
   virtual ~CompactionTask() {}
 
  private:
   // v8::Task overrides.
   void Run() override {
-    // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be
-    // called by one thread concurrently.
-    heap_->mark_compact_collector()->EvacuatePages();
+    heap_->mark_compact_collector()->EvacuatePages(spaces_);
     heap_->mark_compact_collector()
-        ->pending_compaction_jobs_semaphore_.Signal();
+        ->pending_compaction_tasks_semaphore_.Signal();
   }
 
   Heap* heap_;
+  CompactionSpaceCollection* spaces_;
 
   DISALLOW_COPY_AND_ASSIGN(CompactionTask);
 };
@@ -3325,11 +3326,10 @@ void MarkCompactCollector::EvacuateNewSpace() {
 }
 
 
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(
+bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
     Page* p, PagedSpace* target_space) {
   AlwaysAllocateScope always_allocate(isolate());
   DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
-  p->SetWasSwept();
 
   int offsets[16];
 
@@ -3350,17 +3350,8 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(
       HeapObject* target_object = nullptr;
       AllocationResult allocation = target_space->AllocateRaw(size, alignment);
       if (!allocation.To(&target_object)) {
-        // If allocation failed, use emergency memory and re-try allocation.
-        CHECK(target_space->HasEmergencyMemory());
-        target_space->UseEmergencyMemory();
-        allocation = target_space->AllocateRaw(size, alignment);
-      }
-      if (!allocation.To(&target_object)) {
-        // OS refused to give us memory.
-        V8::FatalProcessOutOfMemory("Evacuation");
-        return;
+        return false;
       }
-
       MigrateObject(target_object, object, size, target_space->identity());
       DCHECK(object->map_word().IsForwardingAddress());
     }
@@ -3369,80 +3360,142 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(
     *cell = 0;
   }
   p->ResetLiveBytes();
+  return true;
 }
 
 
 void MarkCompactCollector::EvacuatePagesInParallel() {
+  if (evacuation_candidates_.length() == 0) return;
+
+  int num_tasks = 1;
+  if (FLAG_parallel_compaction) {
+    num_tasks = NumberOfParallelCompactionTasks();
+  }
+
+  // Set up compaction spaces.
+  CompactionSpaceCollection** compaction_spaces_for_tasks =
+      new CompactionSpaceCollection*[num_tasks];
+  for (int i = 0; i < num_tasks; i++) {
+    compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+  }
+
+  compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
+      heap()->old_space());
+  compaction_spaces_for_tasks[0]
+      ->Get(CODE_SPACE)
+      ->MoveOverFreeMemory(heap()->code_space());
+
   parallel_compaction_in_progress_ = true;
-  V8::GetCurrentPlatform()->CallOnBackgroundThread(
-      new CompactionTask(heap()), v8::Platform::kShortRunningTask);
+  // Kick off parallel tasks.
+  for (int i = 1; i < num_tasks; i++) {
+    concurrent_compaction_tasks_active_++;
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
+        v8::Platform::kShortRunningTask);
+  }
+
+  // Contribute in main thread. Counter and signal are in principal not needed.
+  concurrent_compaction_tasks_active_++;
+  EvacuatePages(compaction_spaces_for_tasks[0]);
+  pending_compaction_tasks_semaphore_.Signal();
+
+  WaitUntilCompactionCompleted();
+
+  // Merge back memory (compacted and unused) from compaction spaces.
+  for (int i = 0; i < num_tasks; i++) {
+    heap()->old_space()->MergeCompactionSpace(
+        compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+    heap()->code_space()->MergeCompactionSpace(
+        compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+    delete compaction_spaces_for_tasks[i];
+  }
+  delete[] compaction_spaces_for_tasks;
+
+  // Finalize sequentially.
+  const int num_pages = evacuation_candidates_.length();
+  int abandoned_pages = 0;
+  for (int i = 0; i < num_pages; i++) {
+    Page* p = evacuation_candidates_[i];
+    switch (p->parallel_compaction_state().Value()) {
+      case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+        // We have partially compacted the page, i.e., some objects may have
+        // moved, others are still in place.
+        // We need to:
+        // - Leave the evacuation candidate flag for later processing of
+        //   slots buffer entries.
+        // - Leave the slots buffer there for processing of entries added by
+        //   the write barrier.
+        // - Rescan the page as slot recording in the migration buffer only
+        //   happens upon moving (which we potentially didn't do).
+        // - Leave the page in the list of pages of a space since we could not
+        //   fully evacuate it.
+        DCHECK(p->IsEvacuationCandidate());
+        p->SetFlag(Page::RESCAN_ON_EVACUATION);
+        abandoned_pages++;
+        break;
+      case MemoryChunk::kCompactingFinalize:
+        DCHECK(p->IsEvacuationCandidate());
+        p->SetWasSwept();
+        p->Unlink();
+        break;
+      case MemoryChunk::kCompactingDone:
+        DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
+        DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+        break;
+      default:
+        // We should not observe kCompactingInProgress, or kCompactingDone.
+        UNREACHABLE();
+    }
+    p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+  }
+  if (num_pages > 0) {
+    if (FLAG_trace_fragmentation) {
+      if (abandoned_pages != 0) {
+        PrintF(
+            "  Abandoned (at least partially) %d out of %d page compactions due"
+            " to lack of memory\n",
+            abandoned_pages, num_pages);
+      } else {
+        PrintF("  Compacted %d pages\n", num_pages);
+      }
+    }
+  }
 }
 
 
 void MarkCompactCollector::WaitUntilCompactionCompleted() {
-  pending_compaction_jobs_semaphore_.Wait();
+  while (concurrent_compaction_tasks_active_-- > 0) {
+    pending_compaction_tasks_semaphore_.Wait();
+  }
   parallel_compaction_in_progress_ = false;
 }
 
 
-void MarkCompactCollector::EvacuatePages() {
-  int npages = evacuation_candidates_.length();
-  int abandoned_pages = 0;
-  for (int i = 0; i < npages; i++) {
+void MarkCompactCollector::EvacuatePages(
+    CompactionSpaceCollection* compaction_spaces) {
+  for (int i = 0; i < evacuation_candidates_.length(); i++) {
     Page* p = evacuation_candidates_[i];
     DCHECK(p->IsEvacuationCandidate() ||
            p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
     DCHECK(static_cast<int>(p->parallel_sweeping()) ==
            MemoryChunk::SWEEPING_DONE);
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    // Allocate emergency memory for the case when compaction fails due to out
-    // of memory.
-    if (!space->HasEmergencyMemory()) {
-      space->CreateEmergencyMemory();  // If the OS lets us.
-    }
-    if (p->IsEvacuationCandidate()) {
-      // During compaction we might have to request a new page in order to free
-      // up a page.  Check that we actually got an emergency page above so we
-      // can guarantee that this succeeds.
-      if (space->HasEmergencyMemory()) {
-        EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner()));
-        // Unlink the page from the list of pages here. We must not iterate
-        // over that page later (e.g. when scan on scavenge pages are
-        // processed). The page itself will be freed later and is still
-        // reachable from the evacuation candidates list.
-        p->Unlink();
-      } else {
-        // Without room for expansion evacuation is not guaranteed to succeed.
-        // Pessimistically abandon unevacuated pages.
-        for (int j = i; j < npages; j++) {
-          Page* page = evacuation_candidates_[j];
-          slots_buffer_allocator_->DeallocateChain(
-              page->slots_buffer_address());
-          page->ClearEvacuationCandidate();
-          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    if (p->parallel_compaction_state().TrySetValue(
+            MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+      if (p->IsEvacuationCandidate()) {
+        DCHECK_EQ(p->parallel_compaction_state().Value(),
+                  MemoryChunk::kCompactingInProgress);
+        if (EvacuateLiveObjectsFromPage(
+                p, compaction_spaces->Get(p->owner()->identity()))) {
+          p->parallel_compaction_state().SetValue(
+              MemoryChunk::kCompactingFinalize);
+        } else {
+          p->parallel_compaction_state().SetValue(
+              MemoryChunk::kCompactingAborted);
         }
-        abandoned_pages = npages - i;
-        break;
-      }
-    }
-  }
-  if (npages > 0) {
-    // Release emergency memory.
-    PagedSpaces spaces(heap());
-    for (PagedSpace* space = spaces.next(); space != NULL;
-         space = spaces.next()) {
-      if (space->HasEmergencyMemory()) {
-        space->FreeEmergencyMemory();
-      }
-    }
-    if (FLAG_trace_fragmentation) {
-      if (abandoned_pages != 0) {
-        PrintF(
-            "  Abandon %d out of %d page defragmentations due to lack of "
-            "memory\n",
-            abandoned_pages, npages);
       } else {
-        PrintF("  Defragmented %d pages\n", npages);
+        // There could be popular pages in the list of evacuation candidates
+        // which we do compact.
+        p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
       }
     }
   }
@@ -3631,12 +3684,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_EVACUATE_PAGES);
     EvacuationScope evacuation_scope(this);
-    if (FLAG_parallel_compaction) {
-      EvacuatePagesInParallel();
-      WaitUntilCompactionCompleted();
-    } else {
-      EvacuatePages();
-    }
+    EvacuatePagesInParallel();
   }
 
   // Second pass: find pointers to new space and update them.
@@ -3696,13 +3744,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
           PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
                  SlotsBuffer::SizeOfChain(p->slots_buffer()));
         }
+        slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
 
         // Important: skip list should be cleared only after roots were updated
         // because root iteration traverses the stack and might have to find
         // code objects from non-updated pc pointing into evacuation candidate.
         SkipList* list = p->skip_list();
         if (list != NULL) list->Clear();
-      } else {
+      }
+      if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
                  reinterpret_cast<intptr_t>(p));
@@ -3732,6 +3782,12 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
             break;
         }
       }
+      if (p->IsEvacuationCandidate() &&
+          p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+        // Case where we've aborted compacting a page. Clear the flag here to
+        // avoid release the page later on.
+        p->ClearEvacuationCandidate();
+      }
     }
   }
 
@@ -3778,7 +3834,6 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
     space->Free(p->area_start(), p->area_size());
     p->set_scan_on_scavenge(false);
-    slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
     p->ResetLiveBytes();
     space->ReleasePage(p);
   }
@@ -4394,10 +4449,6 @@ void MarkCompactCollector::SweepSpaces() {
 
   // Deallocate evacuated candidate pages.
   ReleaseEvacuationCandidates();
-  CodeRange* code_range = heap()->isolate()->code_range();
-  if (code_range != NULL && code_range->valid()) {
-    code_range->ReserveEmergencyBlock();
-  }
 
   if (FLAG_print_cumulative_gc_stat) {
     heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
index fe01bb4e860ae646d2c387d09b2606c158c99133..3e65a0e7d500d732a4ad374ad3393ae746649044 100644 (file)
@@ -554,8 +554,11 @@ class MarkCompactCollector {
   // Synchronize sweeper threads.
   base::Semaphore pending_sweeper_jobs_semaphore_;
 
-  // Synchronize compaction threads.
-  base::Semaphore pending_compaction_jobs_semaphore_;
+  // Synchronize compaction tasks.
+  base::Semaphore pending_compaction_tasks_semaphore_;
+
+  // Number of active compaction tasks (including main thread).
+  intptr_t concurrent_compaction_tasks_active_;
 
   bool evacuation_;
 
@@ -713,12 +716,17 @@ class MarkCompactCollector {
 
   void EvacuateNewSpace();
 
-  void EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
-
-  void EvacuatePages();
+  bool EvacuateLiveObjectsFromPage(Page* p, PagedSpace* target_space);
 
+  void EvacuatePages(CompactionSpaceCollection* compaction_spaces);
   void EvacuatePagesInParallel();
 
+  int NumberOfParallelCompactionTasks() {
+    // TODO(hpayer, mlippautz): Figure out some logic to determine the number
+    // of compaction tasks.
+    return 1;
+  }
+
   void WaitUntilCompactionCompleted();
 
   void EvacuateNewSpaceAndCandidates();
index b1c9557989fd5a12cd0f5e2043572b548c362997..c74b93f52c1672417ffbf9d250ffebee234b8b76 100644 (file)
@@ -80,8 +80,7 @@ CodeRange::CodeRange(Isolate* isolate)
       code_range_(NULL),
       free_list_(0),
       allocation_list_(0),
-      current_allocation_block_index_(0),
-      emergency_block_() {}
+      current_allocation_block_index_(0) {}
 
 
 bool CodeRange::SetUp(size_t requested) {
@@ -140,7 +139,6 @@ bool CodeRange::SetUp(size_t requested) {
   current_allocation_block_index_ = 0;
 
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
-  ReserveEmergencyBlock();
   return true;
 }
 
@@ -276,24 +274,6 @@ void CodeRange::ReleaseBlock(const FreeBlock* block) {
 }
 
 
-void CodeRange::ReserveEmergencyBlock() {
-  const size_t requested_size = MemoryAllocator::CodePageAreaSize();
-  if (emergency_block_.size == 0) {
-    ReserveBlock(requested_size, &emergency_block_);
-  } else {
-    DCHECK(emergency_block_.size >= requested_size);
-  }
-}
-
-
-void CodeRange::ReleaseEmergencyBlock() {
-  if (emergency_block_.size != 0) {
-    ReleaseBlock(&emergency_block_);
-    emergency_block_.size = 0;
-  }
-}
-
-
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 //
@@ -492,6 +472,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
   chunk->set_parallel_sweeping(SWEEPING_DONE);
+  chunk->parallel_compaction_state().SetValue(kCompactingDone);
   chunk->mutex_ = NULL;
   chunk->available_in_small_free_list_ = 0;
   chunk->available_in_medium_free_list_ = 0;
@@ -974,8 +955,7 @@ PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
     : Space(heap, space, executable),
       free_list_(this),
       unswept_free_bytes_(0),
-      end_of_unswept_pages_(NULL),
-      emergency_memory_(NULL) {
+      end_of_unswept_pages_(NULL) {
   area_size_ = MemoryAllocator::PageAreaSize(space);
   accounting_stats_.Clear();
 
@@ -1003,30 +983,38 @@ void PagedSpace::TearDown() {
 }
 
 
+void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
+  DCHECK(identity() == other->identity());
+  // Destroy the linear allocation space of {other}. This is needed to
+  //   (a) not waste the memory and
+  //   (b) keep the rest of the chunk in an iterable state (filler is needed).
+  other->EmptyAllocationInfo();
+
+  // Move over the free list. Concatenate makes sure that the source free list
+  // gets properly reset after moving over all nodes.
+  intptr_t freed_bytes = free_list_.Concatenate(other->free_list());
+
+  // Moved memory is not recorded as allocated memory, but rather increases and
+  // decreases capacity of the corresponding spaces. Used size and waste size
+  // are maintained by the receiving space upon allocating and freeing blocks.
+  other->accounting_stats_.DecreaseCapacity(freed_bytes);
+  accounting_stats_.IncreaseCapacity(freed_bytes);
+}
+
+
 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
   // Unmerged fields:
   //   area_size_
   //   allocation_info_
-  //   emergency_memory_
   //   end_of_unswept_pages_
   //   unswept_free_bytes_
   //   anchor_
 
-  // It only makes sense to merge compatible spaces.
-  DCHECK(identity() == other->identity());
-
-  // Destroy the linear allocation space of {other}. This is needed to (a) not
-  // waste the memory and (b) keep the rest of the chunk in an iterable state
-  // (filler is needed).
-  int linear_size = static_cast<int>(other->limit() - other->top());
-  other->Free(other->top(), linear_size);
-
-  // Move over the free list.
-  free_list_.Concatenate(other->free_list());
+  MoveOverFreeMemory(other);
 
   // Update and clear accounting statistics.
   accounting_stats_.Merge(other->accounting_stats_);
-  other->accounting_stats_.Clear();
+  other->accounting_stats_.Reset();
 
   // Move over pages.
   PageIterator it(other);
@@ -1110,9 +1098,6 @@ bool PagedSpace::Expand() {
   if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
 
   DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
-  DCHECK(heap()->CommittedOldGenerationMemory() <=
-         heap()->MaxOldGenerationSize() +
-             PagedSpace::MaxEmergencyMemoryAllocated());
 
   p->InsertAfter(anchor_.prev_page());
 
@@ -1182,51 +1167,6 @@ void PagedSpace::ReleasePage(Page* page) {
 }
 
 
-intptr_t PagedSpace::MaxEmergencyMemoryAllocated() {
-  // New space and large object space.
-  static const int spaces_without_emergency_memory = 2;
-  static const int spaces_with_emergency_memory =
-      LAST_SPACE - FIRST_SPACE + 1 - spaces_without_emergency_memory;
-  return Page::kPageSize * spaces_with_emergency_memory;
-}
-
-
-void PagedSpace::CreateEmergencyMemory() {
-  if (identity() == CODE_SPACE) {
-    // Make the emergency block available to the allocator.
-    CodeRange* code_range = heap()->isolate()->code_range();
-    if (code_range != NULL && code_range->valid()) {
-      code_range->ReleaseEmergencyBlock();
-    }
-    DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
-  }
-  emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
-      AreaSize(), AreaSize(), executable(), this);
-}
-
-
-void PagedSpace::FreeEmergencyMemory() {
-  Page* page = static_cast<Page*>(emergency_memory_);
-  DCHECK(page->LiveBytes() == 0);
-  DCHECK(AreaSize() == page->area_size());
-  DCHECK(!free_list_.ContainsPageFreeListItems(page));
-  heap()->isolate()->memory_allocator()->Free(page);
-  emergency_memory_ = NULL;
-}
-
-
-void PagedSpace::UseEmergencyMemory() {
-  // Page::Initialize makes the chunk into a real page and adds it to the
-  // accounting for this space.  Unlike PagedSpace::Expand, we don't check
-  // CanExpand first, so we can go over the limits a little here.  That's OK,
-  // because we are in the process of compacting which will free up at least as
-  // much memory as it allocates.
-  Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
-  page->InsertAfter(anchor_.prev_page());
-  emergency_memory_ = NULL;
-}
-
-
 #ifdef DEBUG
 void PagedSpace::Print() {}
 #endif
@@ -2133,9 +2073,10 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
   if (category->top() != NULL) {
     // This is safe (not going to deadlock) since Concatenate operations
     // are never performed on the same free lists at the same time in
-    // reverse order.
-    base::LockGuard<base::Mutex> target_lock_guard(mutex());
-    base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
+    // reverse order. Furthermore, we only lock if the PagedSpace containing
+    // the free list is know to be globally available, i.e., not local.
+    if (!this->owner()->owner()->is_local()) mutex()->Lock();
+    if (!category->owner()->owner()->is_local()) category->mutex()->Lock();
     DCHECK(category->end_ != NULL);
     free_bytes = category->available();
     if (end_ == NULL) {
@@ -2147,6 +2088,8 @@ intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
     base::NoBarrier_Store(&top_, category->top_);
     available_ += category->available();
     category->Reset();
+    if (!category->owner()->owner()->is_local()) category->mutex()->Unlock();
+    if (!this->owner()->owner()->is_local()) mutex()->Unlock();
   }
   return free_bytes;
 }
@@ -2254,7 +2197,13 @@ void FreeListCategory::RepairFreeList(Heap* heap) {
 }
 
 
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner)
+    : owner_(owner),
+      heap_(owner->heap()),
+      small_list_(this),
+      medium_list_(this),
+      large_list_(this),
+      huge_list_(this) {
   Reset();
 }
 
index e7a0334084e8ddc166b6845bb1aef9a5670542b0..0bc1c175a2c019020ff17a1c1d724d3fb71decbf 100644 (file)
@@ -268,6 +268,19 @@ class SlotsBuffer;
 // any heap object.
 class MemoryChunk {
  public:
+  // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
+  // |kCompactingInProgress|:  Parallel compaction is currently in progress.
+  // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
+  //   be finalized.
+  // |kCompactingAborted|: Parallel compaction has been aborted, which should
+  //   for now only happen in OOM scenarios.
+  enum ParallelCompactingState {
+    kCompactingDone,
+    kCompactingInProgress,
+    kCompactingFinalize,
+    kCompactingAborted,
+  };
+
   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
   static MemoryChunk* FromAddress(Address a) {
     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
@@ -458,6 +471,10 @@ class MemoryChunk {
     base::Release_Store(&parallel_sweeping_, state);
   }
 
+  AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
+    return parallel_compaction_;
+  }
+
   bool TryLock() { return mutex_->TryLock(); }
 
   base::Mutex* mutex() { return mutex_; }
@@ -566,6 +583,7 @@ class MemoryChunk {
       + kPointerSize      // AtomicValue high_water_mark_
       + kPointerSize      // base::Mutex* mutex_
       + kPointerSize      // base::AtomicWord parallel_sweeping_
+      + kPointerSize      // AtomicValue parallel_compaction_
       + 5 * kPointerSize  // AtomicNumber free-list statistics
       + kPointerSize      // base::AtomicWord next_chunk_
       + kPointerSize;     // base::AtomicWord prev_chunk_
@@ -726,6 +744,7 @@ class MemoryChunk {
 
   base::Mutex* mutex_;
   base::AtomicWord parallel_sweeping_;
+  AtomicValue<ParallelCompactingState> parallel_compaction_;
 
   // PagedSpace free-list statistics.
   AtomicNumber<intptr_t> available_in_small_free_list_;
@@ -986,9 +1005,6 @@ class CodeRange {
   bool UncommitRawMemory(Address start, size_t length);
   void FreeRawMemory(Address buf, size_t length);
 
-  void ReserveEmergencyBlock();
-  void ReleaseEmergencyBlock();
-
  private:
   // Frees the range of virtual memory, and frees the data structures used to
   // manage it.
@@ -1031,12 +1047,6 @@ class CodeRange {
   List<FreeBlock> allocation_list_;
   int current_allocation_block_index_;
 
-  // Emergency block guarantees that we can always allocate a page for
-  // evacuation candidates when code space is compacted. Emergency block is
-  // reserved immediately after GC and is released immedietely before
-  // allocating a page for evacuation.
-  FreeBlock emergency_block_;
-
   // Finds a block on the allocation list that contains at least the
   // requested amount of memory.  If none is found, sorts and merges
   // the existing free memory blocks, and searches again.
@@ -1518,6 +1528,13 @@ class AllocationStats BASE_EMBEDDED {
     }
   }
 
+  void DecreaseCapacity(intptr_t size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    DCHECK_GE(capacity_, 0);
+  }
+
+  void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
+
  private:
   intptr_t capacity_;
   intptr_t max_capacity_;
@@ -1533,7 +1550,8 @@ class AllocationStats BASE_EMBEDDED {
 // the end element of the linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+  explicit FreeListCategory(FreeList* owner)
+      : top_(0), end_(NULL), available_(0), owner_(owner) {}
 
   intptr_t Concatenate(FreeListCategory* category);
 
@@ -1573,6 +1591,8 @@ class FreeListCategory {
   int FreeListLength();
 #endif
 
+  FreeList* owner() { return owner_; }
+
  private:
   // top_ points to the top FreeSpace* in the free list category.
   base::AtomicWord top_;
@@ -1581,6 +1601,8 @@ class FreeListCategory {
 
   // Total available bytes in all blocks of this free list category.
   int available_;
+
+  FreeList* owner_;
 };
 
 
@@ -1673,6 +1695,8 @@ class FreeList {
   FreeListCategory* large_list() { return &large_list_; }
   FreeListCategory* huge_list() { return &huge_list_; }
 
+  PagedSpace* owner() { return owner_; }
+
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
@@ -1969,17 +1993,14 @@ class PagedSpace : public Space {
   // Return size of allocatable area on a page in this space.
   inline int AreaSize() { return area_size_; }
 
-  void CreateEmergencyMemory();
-  void FreeEmergencyMemory();
-  void UseEmergencyMemory();
-  intptr_t MaxEmergencyMemoryAllocated();
-
-  bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
-
   // Merges {other} into the current space. Note that this modifies {other},
   // e.g., removes its bump pointer area and resets statistics.
   void MergeCompactionSpace(CompactionSpace* other);
 
+  void MoveOverFreeMemory(PagedSpace* other);
+
+  virtual bool is_local() { return false; }
+
  protected:
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
@@ -2040,12 +2061,6 @@ class PagedSpace : public Space {
   // end_of_unswept_pages_ page.
   Page* end_of_unswept_pages_;
 
-  // Emergency memory is the memory of a full page for a given space, allocated
-  // conservatively before evacuating a page. If compaction fails due to out
-  // of memory error the emergency memory can be used to complete compaction.
-  // If not used, the emergency memory is released after compaction.
-  MemoryChunk* emergency_memory_;
-
   // Mutex guarding any concurrent access to the space.
   base::Mutex space_mutex_;
 
@@ -2739,12 +2754,40 @@ class CompactionSpace : public PagedSpace {
     Free(start, size_in_bytes);
   }
 
+  virtual bool is_local() { return true; }
+
  protected:
   // The space is temporary and not included in any snapshots.
   virtual bool snapshotable() { return false; }
 };
 
 
+// A collection of |CompactionSpace|s used by a single compaction task.
+class CompactionSpaceCollection : public Malloced {
+ public:
+  explicit CompactionSpaceCollection(Heap* heap)
+      : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
+        code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
+
+  CompactionSpace* Get(AllocationSpace space) {
+    switch (space) {
+      case OLD_SPACE:
+        return &old_space_;
+      case CODE_SPACE:
+        return &code_space_;
+      default:
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+ private:
+  CompactionSpace old_space_;
+  CompactionSpace code_space_;
+};
+
+
 // -----------------------------------------------------------------------------
 // Old object space (includes the old space of objects and code space)