}
-void MarkCompactCollector::FinalizeSweeping() {
- ASSERT(sweeping_pending_ == false);
- ReleaseEvacuationCandidates();
- heap()->FreeQueuedChunks();
-}
-
-
void MarkCompactCollector::MarkInParallel() {
for (int i = 0; i < FLAG_marking_threads; i++) {
heap()->isolate()->marking_threads()[i]->StartMarking();
if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here.
WaitUntilSweepingCompleted();
- FinalizeSweeping();
}
// Clear marking bits if incremental marking is aborted.
slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
page->ClearEvacuationCandidate();
page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
}
return;
}
}
+void MarkCompactCollector::UnlinkEvacuationCandidates() {
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ if (!p->IsEvacuationCandidate()) continue;
+ p->Unlink();
+ p->ClearSweptPrecisely();
+ p->ClearSweptConservatively();
+ }
+}
+
+
void MarkCompactCollector::ReleaseEvacuationCandidates() {
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
p->set_scan_on_scavenge(false);
slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
- space->ReleasePage(p);
+ space->ReleasePage(p, false);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
+ heap()->FreeQueuedChunks();
}
// Adjust unswept free bytes because releasing a page expects said
// counter to be accurate for unswept pages.
space->IncreaseUnsweptFreeBytes(p);
- space->ReleasePage(p);
+ space->ReleasePage(p, true);
continue;
}
unused_page_present = true;
SweepSpace(heap()->old_pointer_space(), how_to_sweep);
SweepSpace(heap()->old_data_space(), how_to_sweep);
+ // Unlink evacuation candidates before sweeper threads access the list of
+ // pages to avoid race condition.
+ UnlinkEvacuationCandidates();
+
if (how_to_sweep == PARALLEL_CONSERVATIVE ||
how_to_sweep == CONCURRENT_CONSERVATIVE) {
// TODO(hpayer): fix race with concurrent sweeper
// Deallocate unmarked objects and clear marked bits for marked objects.
heap_->lo_space()->FreeUnmarkedObjects();
- if (how_to_sweep != CONCURRENT_CONSERVATIVE) {
- FinalizeSweeping();
- }
+ ReleaseEvacuationCandidates();
}
return true;
}
+
bool PagedSpace::Expand() {
if (!CanExpand()) return false;
}
-void PagedSpace::ReleasePage(Page* page) {
+void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
allocation_info_.top = allocation_info_.limit = NULL;
}
- page->Unlink();
+ if (unlink) {
+ page->Unlink();
+ }
if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
heap()->isolate()->memory_allocator()->Free(page);
} else {
if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
if (!collector->sequential_sweeping()) {
collector->WaitUntilSweepingCompleted();
- collector->FinalizeSweeping();
return true;
}
}