compacting_(false),
was_marked_incrementally_(false),
sweeping_in_progress_(false),
+ parallel_compaction_in_progress_(false),
pending_sweeper_jobs_semaphore_(0),
pending_compaction_jobs_semaphore_(0),
evacuation_(false),
void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(slot);
+ if (parallel_compaction_in_progress_) {
+ heap_->store_buffer()->MarkSynchronized(slot);
+ } else {
+ heap_->store_buffer()->Mark(slot);
+ }
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
reinterpret_cast<Object**>(slot),
void MarkCompactCollector::EvacuatePagesInParallel() {
+ parallel_compaction_in_progress_ = true;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap()), v8::Platform::kShortRunningTask);
}
+void MarkCompactCollector::WaitUntilCompactionCompleted() {
+ pending_compaction_jobs_semaphore_.Wait();
+ parallel_compaction_in_progress_ = false;
+}
+
+
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
int abandoned_pages = 0;
EvacuationScope evacuation_scope(this);
if (FLAG_parallel_compaction) {
EvacuatePagesInParallel();
- pending_compaction_jobs_semaphore_.Wait();
+ WaitUntilCompactionCompleted();
} else {
EvacuatePages();
}
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
+ // True if parallel compaction is currently in progress.
+ bool parallel_compaction_in_progress_;
+
// Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_;
void EvacuatePagesInParallel();
+ void WaitUntilCompactionCompleted();
+
void EvacuateNewSpaceAndCandidates();
void ReleaseEvacuationCandidates();
void SetUp();
void TearDown();
- // This is used by the mutator to enter addresses into the store buffer.
+ // This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
+ // This is used to add addresses to the store buffer when multiple threads
+ // may operate on the store buffer.
+ inline void MarkSynchronized(Address addr);
+
// This is used by the heap traversal to enter the addresses into the store
// buffer that should still be in the store buffer after GC. It enters
// addresses directly into the old buffer because the GC starts by wiping the
uintptr_t* hash_set_2_;
bool hash_sets_are_empty_;
+ // Used for synchronization of concurrent store buffer access.
+ base::Mutex mutex_;
+
void ClearFilteringHashSets();
bool SpaceAvailable(intptr_t space_needed);