state_(IDLE),
#endif
marking_parity_(ODD_MARKING_PARITY),
- compacting_(false),
was_marked_incrementally_(false),
- sweeping_in_progress_(false),
- parallel_compaction_in_progress_(false),
- pending_sweeper_jobs_semaphore_(0),
- pending_compaction_tasks_semaphore_(0),
- concurrent_compaction_tasks_active_(0),
evacuation_(false),
slots_buffer_allocator_(nullptr),
migration_slots_buffer_(nullptr),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(0),
code_flusher_(NULL),
- have_code_to_deoptimize_(false) {
+ have_code_to_deoptimize_(false),
+ compacting_(false),
+ sweeping_in_progress_(false),
+ compaction_in_progress_(false),
+ pending_sweeper_tasks_semaphore_(0),
+ pending_compaction_tasks_semaphore_(0),
+ concurrent_compaction_tasks_active_(0) {
}
#ifdef VERIFY_HEAP
// v8::Task overrides.
void Run() override {
heap_->mark_compact_collector()->SweepInParallel(space_, 0);
- heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
}
Heap* heap_;
}
if (heap()->concurrent_sweeping_enabled()) {
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
bool MarkCompactCollector::IsSweepingCompleted() {
- if (!pending_sweeper_jobs_semaphore_.WaitFor(
+ if (!pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
return false;
}
- pending_sweeper_jobs_semaphore_.Signal();
+ pending_sweeper_tasks_semaphore_.Signal();
return true;
}
// When parallel compaction is in progress, store and slots buffer entries
// require synchronization.
if (heap_->InNewSpace(value)) {
- if (parallel_compaction_in_progress_) {
+ if (compaction_in_progress_) {
heap_->store_buffer()->MarkSynchronized(slot);
} else {
heap_->store_buffer()->Mark(slot);
->Get(CODE_SPACE)
->MoveOverFreeMemory(heap()->code_space());
- parallel_compaction_in_progress_ = true;
+ compaction_in_progress_ = true;
// Kick off parallel tasks.
for (int i = 1; i < num_tasks; i++) {
concurrent_compaction_tasks_active_++;
pending_compaction_tasks_semaphore_.Wait();
concurrent_compaction_tasks_active_--;
}
- parallel_compaction_in_progress_ = false;
+ compaction_in_progress_ = false;
}
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- DCHECK(static_cast<int>(p->parallel_sweeping()) ==
- MemoryChunk::SWEEPING_DONE);
+ DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
+ MemoryChunk::kSweepingDone);
if (p->parallel_compaction_state().TrySetValue(
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
if (p->IsEvacuationCandidate()) {
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
// sweeping by the main thread.
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
} else {
p->SetWasSwept();
}
int max_freed = 0;
if (page->TryLock()) {
// If this page was already swept in the meantime, we can return here.
- if (page->parallel_sweeping() != MemoryChunk::SWEEPING_PENDING) {
+ if (page->parallel_sweeping_state().Value() !=
+ MemoryChunk::kSweepingPending) {
page->mutex()->Unlock();
return 0;
}
- page->set_parallel_sweeping(MemoryChunk::SWEEPING_IN_PROGRESS);
+ page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
FreeList* free_list;
FreeList private_free_list(space);
if (space->identity() == OLD_SPACE) {
while (it.has_next()) {
Page* p = it.next();
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearWasSwept();
PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
reinterpret_cast<intptr_t>(p));
}
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
space->IncreaseUnsweptFreeBytes(p);
}
space->set_end_of_unswept_pages(p);
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+ if (p->parallel_sweeping_state().Value() ==
+ MemoryChunk::kSweepingFinalize) {
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
p->SetWasSwept();
}
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
}
}
MarkingParity marking_parity_;
- // True if we are collecting slots to perform evacuation from evacuation
- // candidates.
- bool compacting_;
-
bool was_marked_incrementally_;
- // True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
-
- // True if parallel compaction is currently in progress.
- bool parallel_compaction_in_progress_;
-
- // Synchronize sweeper threads.
- base::Semaphore pending_sweeper_jobs_semaphore_;
-
- // Synchronize compaction tasks.
- base::Semaphore pending_compaction_tasks_semaphore_;
-
- // Number of active compaction tasks (including main thread).
- intptr_t concurrent_compaction_tasks_active_;
-
bool evacuation_;
SlotsBufferAllocator* slots_buffer_allocator_;
base::SmartPointer<FreeList> free_list_code_space_;
base::SmartPointer<FreeList> free_list_map_space_;
+ // True if we are collecting slots to perform evacuation from evacuation
+ // candidates.
+ bool compacting_;
+
+ // True if concurrent or parallel sweeping is currently in progress.
+ bool sweeping_in_progress_;
+
+ // True if parallel compaction is currently in progress.
+ bool compaction_in_progress_;
+
+ // Semaphore used to synchronize sweeper tasks.
+ base::Semaphore pending_sweeper_tasks_semaphore_;
+
+ // Semaphore used to synchronize compaction tasks.
+ base::Semaphore pending_compaction_tasks_semaphore_;
+
+ // Number of active compaction tasks (including main thread).
+ intptr_t concurrent_compaction_tasks_active_;
+
friend class Heap;
};
kCompactingAborted,
};
+ // |kSweepingDone|: The page state when sweeping is complete or sweeping must
+ // not be performed on that page.
+ // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
+ // not touch the page memory anymore.
+ // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
+ // |kSweepingPending|: This page is ready for parallel sweeping.
+ enum ParallelSweepingState {
+ kSweepingDone,
+ kSweepingFinalize,
+ kSweepingInProgress,
+ kSweepingPending
+ };
+
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
static MemoryChunk* FromAddress(Address a) {
return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
// Return all current flags.
intptr_t GetFlags() { return flags_; }
-
- // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
- // not be performed on that page.
- // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
- // not touch the page memory anymore.
- // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
- // SWEEPING_PENDING - This page is ready for parallel sweeping.
- enum ParallelSweepingState {
- SWEEPING_DONE,
- SWEEPING_FINALIZE,
- SWEEPING_IN_PROGRESS,
- SWEEPING_PENDING
- };
-
- ParallelSweepingState parallel_sweeping() {
- return static_cast<ParallelSweepingState>(
- base::Acquire_Load(¶llel_sweeping_));
- }
-
- void set_parallel_sweeping(ParallelSweepingState state) {
- base::Release_Store(¶llel_sweeping_, state);
+ AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
+ return parallel_sweeping_;
}
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
DCHECK(SweepingCompleted());
}
- bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+ bool SweepingCompleted() {
+ return parallel_sweeping_state().Value() <= kSweepingFinalize;
+ }
// Manage live byte count (count of bytes known to be live,
// because they are marked black).
AtomicValue<intptr_t> high_water_mark_;
base::Mutex* mutex_;
- base::AtomicWord parallel_sweeping_;
+ AtomicValue<ParallelSweepingState> parallel_sweeping_;
AtomicValue<ParallelCompactingState> parallel_compaction_;
// PagedSpace free-list statistics.