DEFINE_INT(max_object_groups_marking_rounds, 3,
"at most try this many times to over approximate the weak closure")
DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
+DEFINE_BOOL(parallel_compaction, false, "use parallel compaction")
DEFINE_BOOL(trace_incremental_marking, false,
"trace progress of the incremental marking")
DEFINE_BOOL(track_gc_object_stats, false,
DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(predictable, parallel_compaction)
// mark-compact.cc
DEFINE_BOOL(force_marking_deque_overflows, false,
was_marked_incrementally_(false),
sweeping_in_progress_(false),
pending_sweeper_jobs_semaphore_(0),
+ pending_compaction_jobs_semaphore_(0),
evacuation_(false),
migration_slots_buffer_(NULL),
heap_(heap),
}
+class MarkCompactCollector::CompactionTask : public v8::Task {
+ public:
+ explicit CompactionTask(Heap* heap) : heap_(heap) {}
+
+ virtual ~CompactionTask() {}
+
+ private:
+ // v8::Task overrides.
+ void Run() override {
+ // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be
+ // called by one thread concurrently.
+ heap_->mark_compact_collector()->EvacuatePages();
+ heap_->mark_compact_collector()
+ ->pending_compaction_jobs_semaphore_.Signal();
+ }
+
+ Heap* heap_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
+
+
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
}
+void MarkCompactCollector::EvacuatePagesInParallel() {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompactionTask(heap()), v8::Platform::kShortRunningTask);
+}
+
+
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
int abandoned_pages = 0;
GCTracer::Scope gc_scope(heap()->tracer(),
GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuationScope evacuation_scope(this);
- EvacuatePages();
+ if (FLAG_parallel_compaction) {
+ EvacuatePagesInParallel();
+ pending_compaction_jobs_semaphore_.Wait();
+ } else {
+ EvacuatePages();
+ }
}
// Second pass: find pointers to new space and update them.
void RemoveObjectSlots(Address start_slot, Address end_slot);
private:
+ class CompactionTask;
class SweeperTask;
explicit MarkCompactCollector(Heap* heap);
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
+ // Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_;
+ // Synchronize compaction threads.
+ base::Semaphore pending_compaction_jobs_semaphore_;
+
bool evacuation_;
SlotsBufferAllocator slots_buffer_allocator_;
void EvacuatePages();
+ void EvacuatePagesInParallel();
+
void EvacuateNewSpaceAndCandidates();
void ReleaseEvacuationCandidates();