private:
// v8::Task overrides.
virtual void Run() V8_OVERRIDE {
- heap_->mark_compact_collector()->SweepInParallel(space_);
+ heap_->mark_compact_collector()->SweepInParallel(space_, 0);
heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
}
switch (space->identity()) {
case OLD_DATA_SPACE:
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+ SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
break;
case OLD_POINTER_SPACE:
SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
FreeList* free_list,
Address start,
int size) {
- if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
+ if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
return space->Free(start, size);
} else {
return size - free_list->Free(start, size);
// Force instantiation of templatized SweepConservatively method for
-// SWEEP_SEQUENTIALLY mode.
+// SWEEP_ON_MAIN_THREAD mode.
template intptr_t MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
+ SweepConservatively<MarkCompactCollector::SWEEP_ON_MAIN_THREAD>(
PagedSpace*, FreeList*, Page*);
ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
free_list != NULL) ||
- (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
+ (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD &&
free_list == NULL));
// When parallel sweeping is active, the page will be marked after
// sweeping by the main thread.
- if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ if (mode == MarkCompactCollector::SWEEP_IN_PARALLEL) {
+ p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+ } else {
p->MarkSweptConservatively();
}
intptr_t freed_bytes = 0;
+ intptr_t max_freed_bytes = 0;
size_t size = 0;
// Skip over all the dead objects at the start of the page and mark them free.
if (it.Done()) {
size = p->area_end() - p->area_start();
- freed_bytes += Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
+ freed_bytes = Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes());
return freed_bytes;
}
Address free_end = StartOfLiveObject(cell_base, *cell);
// Free the first free space.
size = free_end - p->area_start();
- freed_bytes += Free<mode>(space, free_list, p->area_start(),
- static_cast<int>(size));
+ freed_bytes = Free<mode>(space, free_list, p->area_start(),
+ static_cast<int>(size));
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
// The start of the current free area is represented in undigested form by
// the address of the last 32-word section that contained a live object and
// so now we need to find the start of the first live object at the
// end of the free space.
free_end = StartOfLiveObject(cell_base, *cell);
- freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(free_end - free_start));
+ freed_bytes = Free<mode>(space, free_list, free_start,
+ static_cast<int>(free_end - free_start));
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
}
// Update our undigested record of where the current free area started.
// Handle the free space at the end of the page.
if (cell_base - free_start > 32 * kPointerSize) {
free_start = DigestFreeStart(free_start, free_start_cell);
- freed_bytes += Free<mode>(space, free_list, free_start,
- static_cast<int>(p->area_end() - free_start));
+ freed_bytes = Free<mode>(space, free_list, free_start,
+ static_cast<int>(p->area_end() - free_start));
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
p->ResetLiveBytes();
- return freed_bytes;
+ return max_freed_bytes;
}
-void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
+int MarkCompactCollector::SweepInParallel(PagedSpace* space,
+ int required_freed_bytes) {
PageIterator it(space);
FreeList* free_list = space == heap()->old_pointer_space()
? free_list_old_pointer_space_.get()
: free_list_old_data_space_.get();
FreeList private_free_list(space);
+ int max_freed = 0;
+ int max_freed_overall = 0;
while (it.has_next()) {
Page* p = it.next();
if (p->TryParallelSweeping()) {
- SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
+ max_freed = SweepConservatively<SWEEP_IN_PARALLEL>(
+ space, &private_free_list, p);
free_list->Concatenate(&private_free_list);
- p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+ if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+ return max_freed;
+ }
+ max_freed_overall = Max(max_freed, max_freed_overall);
}
if (p == space->end_of_unswept_pages()) break;
}
+ return max_freed_overall;
}
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+ SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
break;
}
PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
reinterpret_cast<intptr_t>(p));
}
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
+ SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
pages_swept++;
parallel_sweeping_active = true;
} else {
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- // If sweeper threads are still running, wait for them.
if (collector->IsConcurrentSweepingInProgress(this)) {
+ // If sweeping is still in progress try to sweep pages on the main thread.
+ int free_chunk =
+ collector->SweepInParallel(this, size_in_bytes);
+ if (free_chunk >= size_in_bytes) {
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ // We should be able to allocate an object here since we just freed that
+ // much memory.
+ ASSERT(object != NULL);
+ if (object != NULL) return object;
+ }
+
+ // Wait for the sweeper threads here and complete the sweeping phase.
collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object;
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return EnsureSweepingProgress(size_in_bytes);
}