Parallel and concurrent sweeping.
authorhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 30 Jan 2013 12:19:32 +0000 (12:19 +0000)
committerhpayer@chromium.org <hpayer@chromium.org@ce2b1a6d-e550-0410-aec6-3dcde31c8c00>
Wed, 30 Jan 2013 12:19:32 +0000 (12:19 +0000)
Sweep old pointer space and old data space concurrently to the main mutator thread and in parallel.

BUG=

Review URL: https://codereview.chromium.org/11782028

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13552 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

12 files changed:
src/flag-definitions.h
src/heap.cc
src/heap.h
src/isolate.cc
src/isolate.h
src/mark-compact.cc
src/mark-compact.h
src/spaces.cc
src/spaces.h
src/sweeper-thread.cc [new file with mode: 0644]
src/sweeper-thread.h [new file with mode: 0644]
tools/gyp/v8.gyp

index 0b0332a..d547706 100644 (file)
@@ -417,6 +417,10 @@ DEFINE_bool(trace_incremental_marking, false,
             "trace progress of the incremental marking")
 DEFINE_bool(track_gc_object_stats, false,
             "track object counts and memory usage")
+DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping")
+DEFINE_int(sweeper_threads, 1,
+           "number of parallel and concurrent sweeping threads")
 #ifdef VERIFY_HEAP
 DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
 #endif
index 1916b64..8d398ec 100644 (file)
@@ -1308,7 +1308,8 @@ void Heap::Scavenge() {
 
   incremental_marking()->PrepareForScavenge();
 
-  AdvanceSweepers(static_cast<int>(new_space_.Size()));
+  paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size());
+  paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size());
 
   // Flip the semispaces.  After flipping, to space is empty, from space has
   // live objects.
@@ -5420,9 +5421,9 @@ bool Heap::IdleNotification(int hint) {
   // 3. many lazy sweep steps.
   // Use mark-sweep-compact events to count incremental GCs in a round.
 
-
   if (incremental_marking()->IsStopped()) {
-    if (!IsSweepingComplete() &&
+    if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
+        !IsSweepingComplete() &&
         !AdvanceSweepers(static_cast<int>(step_size))) {
       return false;
     }
index 45698a1..4f9b672 100644 (file)
@@ -1636,6 +1636,7 @@ class Heap {
   }
 
   bool AdvanceSweepers(int step_size) {
+    ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping);
     bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
     sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
     return sweeping_complete;
index f0eb36d..2f893ab 100644 (file)
@@ -49,6 +49,7 @@
 #include "simulator.h"
 #include "spaces.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"
 #include "version.h"
 #include "vm-state-inl.h"
 
@@ -1699,6 +1700,7 @@ Isolate::Isolate()
 #undef ISOLATE_INIT_ARRAY_EXECUTE
 }
 
+
 void Isolate::TearDown() {
   TRACE_ISOLATE(tear_down);
 
@@ -1734,6 +1736,14 @@ void Isolate::Deinit() {
   if (state_ == INITIALIZED) {
     TRACE_ISOLATE(deinit);
 
+    if (FLAG_concurrent_sweeping || FLAG_parallel_sweeping) {
+      for (int i = 0; i < FLAG_sweeper_threads; i++) {
+        sweeper_thread_[i]->Stop();
+        delete sweeper_thread_[i];
+      }
+      delete[] sweeper_thread_;
+    }
+
     if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
 
     if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
@@ -2103,6 +2113,17 @@ bool Isolate::Init(Deserializer* des) {
   }
 
   if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
+
+  if (FLAG_parallel_sweeping || FLAG_concurrent_sweeping) {
+    if (FLAG_sweeper_threads < 1) {
+      FLAG_sweeper_threads = 1;
+    }
+    sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads];
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      sweeper_thread_[i] = new SweeperThread(this);
+      sweeper_thread_[i]->Start();
+    }
+  }
   return true;
 }
 
index 8ac8502..ffbce4f 100644 (file)
@@ -78,6 +78,7 @@ class UnicodeCache;
 class ConsStringIteratorOp;
 class StringTracker;
 class StubCache;
+class SweeperThread;
 class ThreadManager;
 class ThreadState;
 class ThreadVisitor;  // Defined in v8threads.h
@@ -1078,6 +1079,10 @@ class Isolate {
   // TODO(svenpanne) This method is on death row...
   static v8::Isolate* GetDefaultIsolateForLocking();
 
+  SweeperThread** sweeper_threads() {
+    return sweeper_thread_;
+  }
+
  private:
   Isolate();
 
@@ -1301,11 +1306,13 @@ class Isolate {
 
   DeferredHandles* deferred_handles_head_;
   OptimizingCompilerThread optimizing_compiler_thread_;
+  SweeperThread** sweeper_thread_;
 
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
   friend class IsolateInitializer;
   friend class OptimizingCompilerThread;
+  friend class SweeperThread;
   friend class ThreadManager;
   friend class Simulator;
   friend class StackGuard;
index e594940..1caa0b6 100644 (file)
@@ -40,6 +40,7 @@
 #include "objects-visiting.h"
 #include "objects-visiting-inl.h"
 #include "stub-cache.h"
+#include "sweeper-thread.h"
 
 namespace v8 {
 namespace internal {
@@ -503,6 +504,42 @@ void MarkCompactCollector::ClearMarkbits() {
 }
 
 
+void MarkCompactCollector::StartSweeperThreads() {
+  SweeperThread::set_sweeping_pending(true);
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+    heap()->isolate()->sweeper_threads()[i]->StartSweeping();
+  }
+}
+
+
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+  if (SweeperThread::sweeping_pending()) {
+    for (int i = 0; i < FLAG_sweeper_threads; i++) {
+      heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+    }
+    SweeperThread::set_sweeping_pending(false);
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
+    StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
+    heap()->FreeQueuedChunks();
+  }
+}
+
+
+intptr_t MarkCompactCollector::
+             StealMemoryFromSweeperThreads(PagedSpace* space) {
+  intptr_t freed_bytes = 0;
+  for (int i = 0; i < FLAG_sweeper_threads; i++) {
+    freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
+  }
+  return freed_bytes;
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+  return heap()->isolate()->sweeper_threads() != NULL;
+}
+
+
 bool Marking::TransferMark(Address old_start, Address new_start) {
   // This is only used when resizing an object.
   ASSERT(MemoryChunk::FromAddress(old_start) ==
@@ -805,6 +842,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
 
   ASSERT(!FLAG_never_compact || !FLAG_always_compact);
 
+  if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
+    // Instead of waiting we could also abort the sweeper threads here.
+    WaitUntilSweepingCompleted();
+  }
+
   // Clear marking bits if incremental marking is aborted.
   if (was_marked_incrementally_ && abort_incremental_marking_) {
     heap()->incremental_marking()->Abort();
@@ -3131,7 +3173,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
 
         switch (space->identity()) {
           case OLD_DATA_SPACE:
-            SweepConservatively(space, p);
+            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
             break;
           case OLD_POINTER_SPACE:
             SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
@@ -3490,6 +3532,19 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
 }
 
 
+template<MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space,
+                     FreeList* free_list,
+                     Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+    return space->Free(start, size);
+  } else {
+    return size - free_list->Free(start, size);
+  }
+}
+
+
 // Sweeps a space conservatively.  After this has been done the larger free
 // spaces have been put on the free list and the smaller ones have been
 // ignored and left untouched.  A free space is always either ignored or put
@@ -3497,12 +3552,15 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
 // because it means that any FreeSpace maps left actually describe a region of
 // memory that can be ignored when scanning.  Dead objects other than free
 // spaces will not contain the free space map.
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
+template<MarkCompactCollector::SweepingParallelism mode>
+intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
+                                                   FreeList* free_list,
+                                                   Page* p) {
   ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  double start_time = 0.0;
-  if (FLAG_print_cumulative_gc_stat) {
-    start_time = OS::TimeCurrentMillis();
-  }
+  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
+         free_list != NULL) ||
+         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+         free_list == NULL));
 
   MarkBit::CellType* cells = p->markbits()->cells();
   p->MarkSweptConservatively();
@@ -3530,8 +3588,7 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   }
   size_t size = block_address - p->area_start();
   if (cell_index == last_cell_index) {
-    freed_bytes += static_cast<int>(space->Free(p->area_start(),
-                                                static_cast<int>(size)));
+    freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
     ASSERT_EQ(0, p->LiveBytes());
     return freed_bytes;
   }
@@ -3540,8 +3597,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
   // Free the first free space.
   size = free_end - p->area_start();
-  freed_bytes += space->Free(p->area_start(),
-                             static_cast<int>(size));
+  freed_bytes += Free<mode>(space, free_list, p->area_start(), size);
+
   // The start of the current free area is represented in undigested form by
   // the address of the last 32-word section that contained a live object and
   // the marking bitmap for that cell, which describes where the live object
@@ -3554,10 +3611,10 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   for ( ;
        cell_index < last_cell_index;
        cell_index++, block_address += 32 * kPointerSize) {
-    ASSERT(static_cast<unsigned>(cell_index) ==
-           Bitmap::IndexToCell(
-               Bitmap::CellAlignIndex(
-                   p->AddressToMarkbitIndex(block_address))));
+    ASSERT((unsigned)cell_index ==
+        Bitmap::IndexToCell(
+            Bitmap::CellAlignIndex(
+                p->AddressToMarkbitIndex(block_address))));
     uint32_t cell = cells[cell_index];
     if (cell != 0) {
       // We have a live object.  Check approximately whether it is more than 32
@@ -3570,8 +3627,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
           // so now we need to find the start of the first live object at the
           // end of the free space.
           free_end = StartOfLiveObject(block_address, cell);
-          freed_bytes += space->Free(free_start,
-                                     static_cast<int>(free_end - free_start));
+          freed_bytes += Free<mode>(space, free_list, free_start,
+                                    static_cast<int>(free_end - free_start));
         }
       }
       // Update our undigested record of where the current free area started.
@@ -3585,23 +3642,33 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) {
   // Handle the free space at the end of the page.
   if (block_address - free_start > 32 * kPointerSize) {
     free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += space->Free(free_start,
-                               static_cast<int>(block_address - free_start));
+    freed_bytes += Free<mode>(space, free_list, free_start,
+                              static_cast<int>(block_address - free_start));
   }
 
   p->ResetLiveBytes();
+  return freed_bytes;
+}
 
-  if (FLAG_print_cumulative_gc_stat) {
-    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
+
+void MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                           FreeList* private_free_list,
+                                           FreeList* free_list) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+
+    if (p->TryParallelSweeping()) {
+      SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
+      free_list->Concatenate(private_free_list);
+    }
   }
-  return freed_bytes;
 }
 
 
 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
                                       sweeper == LAZY_CONSERVATIVE);
-
   space->ClearStats();
 
   PageIterator it(space);
@@ -3614,6 +3681,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
   while (it.has_next()) {
     Page* p = it.next();
 
+    ASSERT(p->parallel_sweeping() == 0);
     // Clear sweeping flags indicating that marking bits are still intact.
     p->ClearSweptPrecisely();
     p->ClearSweptConservatively();
@@ -3659,7 +3727,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        SweepConservatively(space, p);
+        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         break;
       }
@@ -3668,12 +3736,20 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
           PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
                  reinterpret_cast<intptr_t>(p));
         }
-        freed_bytes += SweepConservatively(space, p);
+        freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
         pages_swept++;
         space->SetPagesToSweep(p->next_page());
         lazy_sweeping_active = true;
         break;
       }
+      case PARALLEL_CONSERVATIVE: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        p->set_parallel_sweeping(1);
+        break;
+      }
       case PRECISE: {
         if (FLAG_gc_verbose) {
           PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
@@ -3713,11 +3789,13 @@ void MarkCompactCollector::SweepSpaces() {
       FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
   if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
   if (sweep_precisely_) how_to_sweep = PRECISE;
+  if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE;
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
   // the map space last because freeing non-live maps overwrites them and
   // the other spaces rely on possibly non-live maps to get the sizes for
   // non-live objects.
+
   SweepSpace(heap()->old_pointer_space(), how_to_sweep);
   SweepSpace(heap()->old_data_space(), how_to_sweep);
 
@@ -3728,6 +3806,15 @@ void MarkCompactCollector::SweepSpaces() {
 
   EvacuateNewSpaceAndCandidates();
 
+  if (AreSweeperThreadsActivated()) {
+    // TODO(hpayer): The starting of the sweeper threads should be after
+    // SweepSpace old data space.
+    StartSweeperThreads();
+    if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) {
+      WaitUntilSweepingCompleted();
+    }
+  }
+
   // ClearNonLiveTransitions depends on precise sweeping of map space to
   // detect whether unmarked map became dead in this collection or in one
   // of the previous ones.
index 0837959..0c98016 100644 (file)
@@ -594,9 +594,15 @@ class MarkCompactCollector {
   enum SweeperType {
     CONSERVATIVE,
     LAZY_CONSERVATIVE,
+    PARALLEL_CONSERVATIVE,
     PRECISE
   };
 
+  enum SweepingParallelism {
+    SWEEP_SEQUENTIALLY,
+    SWEEP_IN_PARALLEL
+  };
+
 #ifdef VERIFY_HEAP
   void VerifyMarkbitsAreClean();
   static void VerifyMarkbitsAreClean(PagedSpace* space);
@@ -605,7 +611,10 @@ class MarkCompactCollector {
 
   // Sweep a single page from the given space conservatively.
   // Return a number of reclaimed bytes.
-  static intptr_t SweepConservatively(PagedSpace* space, Page* p);
+  template<SweepingParallelism type>
+  static intptr_t SweepConservatively(PagedSpace* space,
+                                      FreeList* free_list,
+                                      Page* p);
 
   INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
     return Page::FromAddress(reinterpret_cast<Address>(anchor))->
@@ -671,6 +680,16 @@ class MarkCompactCollector {
 
   MarkingParity marking_parity() { return marking_parity_; }
 
+  void SweepInParallel(PagedSpace* space,
+                       FreeList* private_free_list,
+                       FreeList* free_list);
+
+  void WaitUntilSweepingCompleted();
+
+  intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
+
+  bool AreSweeperThreadsActivated();
+
  private:
   MarkCompactCollector();
   ~MarkCompactCollector();
@@ -679,6 +698,7 @@ class MarkCompactCollector {
   void RemoveDeadInvalidatedCode();
   void ProcessInvalidatedCode(ObjectVisitor* visitor);
 
+  void StartSweeperThreads();
 
 #ifdef DEBUG
   enum CollectorState {
index bf1a74e..711cde1 100644 (file)
@@ -466,6 +466,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
   chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
   chunk->progress_bar_ = 0;
   chunk->high_water_mark_ = static_cast<int>(area_start - base);
+  chunk->parallel_sweeping_ = 0;
   chunk->ResetLiveBytes();
   Bitmap::Clear(chunk);
   chunk->initialize_scan_on_scavenge(false);
@@ -2041,6 +2042,29 @@ void FreeListNode::set_next(FreeListNode* next) {
 }
 
 
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+  intptr_t free_bytes = 0;
+  if (category->top_ != NULL) {
+    ASSERT(category->end_ != NULL);
+    // This is safe (not going to deadlock) since Concatenate operations
+    // are never performed on the same free lists at the same time in
+    // reverse order.
+    ScopedLock lock_target(mutex_);
+    ScopedLock lock_source(category->mutex());
+    free_bytes = category->available();
+    if (end_ == NULL) {
+      end_ = category->end();
+    } else {
+      category->end()->set_next(top_);
+    }
+    top_ = category->top();
+    available_ += category->available();
+    category->Reset();
+  }
+  return free_bytes;
+}
+
+
 void FreeListCategory::Reset() {
   top_ = NULL;
   end_ = NULL;
@@ -2139,6 +2163,16 @@ FreeList::FreeList(PagedSpace* owner)
 }
 
 
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+  intptr_t free_bytes = 0;
+  free_bytes += small_list_.Concatenate(free_list->small_list());
+  free_bytes += medium_list_.Concatenate(free_list->medium_list());
+  free_bytes += large_list_.Concatenate(free_list->large_list());
+  free_bytes += huge_list_.Concatenate(free_list->huge_list());
+  return free_bytes;
+}
+
+
 void FreeList::Reset() {
   small_list_.Reset();
   medium_list_.Reset();
@@ -2503,7 +2537,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
                reinterpret_cast<intptr_t>(p));
       }
       DecreaseUnsweptFreeBytes(p);
-      freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
+      freed_bytes +=
+          MarkCompactCollector::
+              SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+                  this, NULL, p);
     }
     p = next_page;
   } while (p != anchor() && freed_bytes < bytes_to_sweep);
@@ -2535,6 +2572,21 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
 }
 
 
+bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->AreSweeperThreadsActivated()) {
+    if (FLAG_concurrent_sweeping &&
+        collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
+      collector->WaitUntilSweepingCompleted();
+      return true;
+    }
+    return false;
+  } else {
+    return AdvanceSweeper(size_in_bytes);
+  }
+}
+
+
 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Allocation in this space has failed.
 
@@ -2544,7 +2596,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   bool sweeping_complete = false;
 
   for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
-    sweeping_complete = AdvanceSweeper(size_in_bytes);
+    sweeping_complete = EnsureSweeperProgress(size_in_bytes);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2567,7 +2619,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
   // Last ditch, sweep all the remaining pages to try to find space.  This may
   // cause a pause.
   if (!IsSweepingComplete()) {
-    AdvanceSweeper(kMaxInt);
+    EnsureSweeperProgress(kMaxInt);
 
     // Retry the free list allocation.
     HeapObject* object = free_list_.Allocate(size_in_bytes);
index 9df1d9e..25f907e 100644 (file)
@@ -454,6 +454,18 @@ class MemoryChunk {
   // Return all current flags.
   intptr_t GetFlags() { return flags_; }
 
+  intptr_t parallel_sweeping() const {
+    return parallel_sweeping_;
+  }
+
+  void set_parallel_sweeping(intptr_t state) {
+    parallel_sweeping_ = state;
+  }
+
+  bool TryParallelSweeping() {
+    return NoBarrier_CompareAndSwap(&parallel_sweeping_, 1, 0) == 1;
+  }
+
   // Manage live byte count (count of bytes known to be live,
   // because they are marked black).
   void ResetLiveBytes() {
@@ -533,8 +545,8 @@ class MemoryChunk {
   static const size_t kWriteBarrierCounterOffset =
       kSlotsBufferOffset + kPointerSize + kPointerSize;
 
-  static const size_t kHeaderSize =
-      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
+  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
+                                    kIntSize + kIntSize + kPointerSize;
 
   static const int kBodyOffset =
       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -686,6 +698,8 @@ class MemoryChunk {
   // count highest number of bytes ever allocated on the page.
   int high_water_mark_;
 
+  intptr_t parallel_sweeping_;
+
   static MemoryChunk* Initialize(Heap* heap,
                                  Address base,
                                  size_t size,
@@ -1395,7 +1409,17 @@ class FreeListNode: public HeapObject {
 // the end element of the linked list of free memory blocks.
 class FreeListCategory {
  public:
-  FreeListCategory() : top_(NULL), end_(NULL), available_(0) {}
+  FreeListCategory() :
+      top_(NULL),
+      end_(NULL),
+      mutex_(OS::CreateMutex()),
+      available_(0) {}
+
+  ~FreeListCategory() {
+    delete mutex_;
+  }
+
+  intptr_t Concatenate(FreeListCategory* category);
 
   void Reset();
 
@@ -1421,6 +1445,8 @@ class FreeListCategory {
   int available() const { return available_; }
   void set_available(int available) { available_ = available; }
 
+  Mutex* mutex() { return mutex_; }
+
 #ifdef DEBUG
   intptr_t SumFreeList();
   int FreeListLength();
@@ -1429,6 +1455,7 @@ class FreeListCategory {
  private:
   FreeListNode* top_;
   FreeListNode* end_;
+  Mutex* mutex_;
 
   // Total available bytes in all blocks of this free list category.
   int available_;
@@ -1462,6 +1489,8 @@ class FreeList BASE_EMBEDDED {
  public:
   explicit FreeList(PagedSpace* owner);
 
+  intptr_t Concatenate(FreeList* free_list);
+
   // Clear the free list.
   void Reset();
 
@@ -1509,6 +1538,11 @@ class FreeList BASE_EMBEDDED {
 
   intptr_t EvictFreeListItems(Page* p);
 
+  FreeListCategory* small_list() { return &small_list_; }
+  FreeListCategory* medium_list() { return &medium_list_; }
+  FreeListCategory* large_list() { return &large_list_; }
+  FreeListCategory* huge_list() { return &huge_list_; }
+
  private:
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
@@ -1723,6 +1757,11 @@ class PagedSpace : public Space {
 
   bool AdvanceSweeper(intptr_t bytes_to_sweep);
 
+  // When parallel sweeper threads are active this function waits
+  // for them to complete, otherwise AdvanceSweeper with size_in_bytes
+  // is called.
+  bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
   bool IsSweepingComplete() {
     return !first_unswept_page_->is_valid();
   }
@@ -1747,6 +1786,12 @@ class PagedSpace : public Space {
   }
 
  protected:
+  FreeList* free_list() { return &free_list_; }
+
+  void AddToAccountingStats(intptr_t bytes) {
+    accounting_stats_.DeallocateBytes(bytes);
+  }
+
   int area_size_;
 
   // Maximum capacity of this space.
@@ -1796,6 +1841,7 @@ class PagedSpace : public Space {
   MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
 
   friend class PageIterator;
+  friend class SweeperThread;
 };
 
 
diff --git a/src/sweeper-thread.cc b/src/sweeper-thread.cc
new file mode 100644 (file)
index 0000000..7e31e6c
--- /dev/null
@@ -0,0 +1,105 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "sweeper-thread.h"
+
+#include "v8.h"
+
+#include "isolate.h"
+#include "v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+SweeperThread::SweeperThread(Isolate* isolate)
+     : Thread("SweeperThread"),
+       isolate_(isolate),
+       heap_(isolate->heap()),
+       collector_(heap_->mark_compact_collector()),
+       start_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       end_sweeping_semaphore_(OS::CreateSemaphore(0)),
+       stop_semaphore_(OS::CreateSemaphore(0)),
+       free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)),
+       private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)),
+       private_free_list_old_pointer_space_(
+           heap_->paged_space(OLD_POINTER_SPACE)) {
+  NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
+}
+
+
+bool SweeperThread::sweeping_pending_ = false;
+
+
+void SweeperThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+  while (true) {
+    start_sweeping_semaphore_->Wait();
+
+    if (Acquire_Load(&stop_thread_)) {
+      stop_semaphore_->Signal();
+      return;
+    }
+
+    collector_->SweepInParallel(heap_->old_data_space(),
+                                &private_free_list_old_data_space_,
+                                &free_list_old_data_space_);
+    collector_->SweepInParallel(heap_->old_pointer_space(),
+                                &private_free_list_old_pointer_space_,
+                                &free_list_old_pointer_space_);
+    end_sweeping_semaphore_->Signal();
+  }
+}
+
+intptr_t SweeperThread::StealMemory(PagedSpace* space) {
+  intptr_t free_bytes = 0;
+  if (space->identity() == OLD_POINTER_SPACE) {
+    free_bytes = space->free_list()->Concatenate(&free_list_old_pointer_space_);
+    space->AddToAccountingStats(free_bytes);
+  } else if (space->identity() == OLD_DATA_SPACE) {
+    free_bytes = space->free_list()->Concatenate(&free_list_old_data_space_);
+    space->AddToAccountingStats(free_bytes);
+  }
+  return free_bytes;
+}
+
+void SweeperThread::Stop() {
+  Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
+  start_sweeping_semaphore_->Signal();
+  stop_semaphore_->Wait();
+}
+
+
+void SweeperThread::StartSweeping() {
+  start_sweeping_semaphore_->Signal();
+}
+
+
+void SweeperThread::WaitForSweeperThread() {
+  end_sweeping_semaphore_->Wait();
+}
+} }  // namespace v8::internal
diff --git a/src/sweeper-thread.h b/src/sweeper-thread.h
new file mode 100644 (file)
index 0000000..ba793c2
--- /dev/null
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SWEEPER_THREAD_H_
+#define V8_SWEEPER_THREAD_H_
+
+#include "atomicops.h"
+#include "flags.h"
+#include "platform.h"
+#include "v8utils.h"
+
+#include "spaces.h"
+
+#include "heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public Thread {
+ public:
+  explicit SweeperThread(Isolate* isolate);
+
+  void Run();
+  void Stop();
+  void StartSweeping();
+  void WaitForSweeperThread();
+  intptr_t StealMemory(PagedSpace* space);
+
+  static bool sweeping_pending() { return sweeping_pending_; }
+  static void set_sweeping_pending(bool sweeping_pending) {
+    sweeping_pending_ = sweeping_pending;
+  }
+
+  ~SweeperThread() {
+    delete start_sweeping_semaphore_;
+    delete end_sweeping_semaphore_;
+    delete stop_semaphore_;
+  }
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  MarkCompactCollector* collector_;
+  Semaphore* start_sweeping_semaphore_;
+  Semaphore* end_sweeping_semaphore_;
+  Semaphore* stop_semaphore_;
+  FreeList free_list_old_data_space_;
+  FreeList free_list_old_pointer_space_;
+  FreeList private_free_list_old_data_space_;
+  FreeList private_free_list_old_pointer_space_;
+  volatile AtomicWord stop_thread_;
+  static bool sweeping_pending_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_SWEEPER_THREAD_H_
index 2dda977..f15359e 100644 (file)
             '../../src/strtod.h',
             '../../src/stub-cache.cc',
             '../../src/stub-cache.h',
+            '../../src/sweeper-thread.h',
+            '../../src/sweeper-thread.cc',
             '../../src/token.cc',
             '../../src/token.h',
             '../../src/transitions-inl.h',