// pages is set after sweeping all pages.
return (!is_in_old_pointer_space && !is_in_old_data_space) ||
page->WasSwept() ||
- (page->parallel_sweeping() <=
- MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
+ (mark_compact_collector()->AreSweeperThreadsActivated() &&
+ page->parallel_sweeping() <=
+ MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
}
// If the IdleNotifcation is called with a large hint we will wait for
// the sweepter threads here.
if (hint >= kMinHintForFullGC &&
- mark_compact_collector()->sweeping_in_progress()) {
- mark_compact_collector()->EnsureSweepingCompleted();
+ mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ mark_compact_collector()->WaitUntilSweepingCompleted();
}
return false;
ResetStepCounters();
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
}
if (state_ == SWEEPING) {
- if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+ if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
heap_->mark_compact_collector()->IsSweepingCompleted()) {
- heap_->mark_compact_collector()->EnsureSweepingCompleted();
+ heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
}
- if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
bytes_scanned_ = 0;
StartMarking(PREVENT_COMPACTION);
}
sweeper_thread_ = NULL;
if (FLAG_job_based_sweeping &&
- heap_.mark_compact_collector()->sweeping_in_progress()) {
- heap_.mark_compact_collector()->EnsureSweepingCompleted();
+ heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
}
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
- sweeping_in_progress_(false),
+ sweeping_pending_(false),
pending_sweeper_jobs_semaphore_(0),
sequential_sweeping_(false),
tracer_(NULL),
void MarkCompactCollector::StartSweeperThreads() {
ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
ASSERT(free_list_old_data_space_.get()->IsEmpty());
- sweeping_in_progress_ = true;
+ sweeping_pending_ = true;
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->StartSweeping();
}
}
-void MarkCompactCollector::EnsureSweepingCompleted() {
- ASSERT(sweeping_in_progress_ == true);
-
- // If sweeping is not completed, we try to complete it here. If we do not
- // have sweeper threads we have to complete since we do not have a good
- // indicator for a swept space in that case.
- if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
- SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
- SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
- }
-
+void MarkCompactCollector::WaitUntilSweepingCompleted() {
+ ASSERT(sweeping_pending_ == true);
for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
isolate()->sweeper_threads()[i]->WaitForSweeperThread();
}
pending_sweeper_jobs_semaphore_.Wait();
}
ParallelSweepSpacesComplete();
- sweeping_in_progress_ = false;
+ sweeping_pending_ = false;
RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
return false;
}
}
-
if (FLAG_job_based_sweeping) {
if (!pending_sweeper_jobs_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
}
pending_sweeper_jobs_semaphore_.Signal();
}
-
return true;
}
}
+bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
+ return (space == NULL || space->is_swept_concurrently()) &&
+ sweeping_pending_;
+}
+
+
void Marking::TransferMark(Address old_start, Address new_start) {
// This is only used when resizing an object.
ASSERT(MemoryChunk::FromAddress(old_start) ==
ASSERT(!FLAG_never_compact || !FLAG_always_compact);
- if (sweeping_in_progress()) {
+ if (IsConcurrentSweepingInProgress()) {
// Instead of waiting we could also abort the sweeper threads here.
- EnsureSweepingCompleted();
+ WaitUntilSweepingCompleted();
}
// Clear marking bits if incremental marking is aborted.
static_cast<int>(size));
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
ASSERT_EQ(0, p->LiveBytes());
- return free_list->GuaranteedAllocatable(max_freed_bytes);
+ return freed_bytes;
}
// Grow the size of the start-of-page free space a little to get up to the
}
p->ResetLiveBytes();
- return free_list->GuaranteedAllocatable(max_freed_bytes);
+ return max_freed_bytes;
}
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_is_iterable(sweeper == PRECISE);
+ space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
}
switch (sweeper) {
+ case CONSERVATIVE: {
+ if (FLAG_gc_verbose) {
+ PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
+ reinterpret_cast<intptr_t>(p));
+ }
+ SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
+ pages_swept++;
+ break;
+ }
case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) {
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
- SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE;
- if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
- if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
-
+ SweeperType how_to_sweep = CONSERVATIVE;
+ if (AreSweeperThreadsActivated()) {
+ if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
+ if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
+ }
if (sweep_precisely_) how_to_sweep = PRECISE;
MoveEvacuationCandidatesToEndOfPagesList();
}
if (how_to_sweep == PARALLEL_CONSERVATIVE) {
- EnsureSweepingCompleted();
+ WaitUntilSweepingCompleted();
}
}
RemoveDeadInvalidatedCode();
void EnableCodeFlushing(bool enable);
enum SweeperType {
+ CONSERVATIVE,
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PRECISE
// then the whole given space is swept.
int SweepInParallel(PagedSpace* space, int required_freed_bytes);
- void EnsureSweepingCompleted();
+ void WaitUntilSweepingCompleted();
- // If sweeper threads are not active this method will return true. If
- // this is a latency issue we should be smarter here. Otherwise, it will
- // return true if the sweeper threads are done processing the pages.
bool IsSweepingCompleted();
void RefillFreeList(PagedSpace* space);
bool AreSweeperThreadsActivated();
- // Checks if sweeping is in progress right now on any space.
- bool sweeping_in_progress() { return sweeping_in_progress_; }
+ // If a paged space is passed in, this method checks if the given space is
+ // swept concurrently. Otherwise, this method checks if concurrent sweeping
+ // is in progress right now on any space.
+ bool IsConcurrentSweepingInProgress(PagedSpace* space = NULL);
void set_sequential_sweeping(bool sequential_sweeping) {
sequential_sweeping_ = sequential_sweeping;
bool was_marked_incrementally_;
// True if concurrent or parallel sweeping is currently in progress.
- bool sweeping_in_progress_;
+ bool sweeping_pending_;
base::Semaphore pending_sweeper_jobs_semaphore_;
: Space(heap, id, executable),
free_list_(this),
is_iterable_(true),
+ is_swept_concurrently_(false),
unswept_free_bytes_(0),
end_of_unswept_pages_(NULL) {
if (id == CODE_SPACE) {
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->
+ IsConcurrentSweepingInProgress(this) || (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+HeapObject* PagedSpace::EnsureSweepingProgress(
int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
+
+ if (collector->IsConcurrentSweepingInProgress(this)) {
+ // If sweeping is still in progress try to sweep pages on the main thread.
+ int free_chunk =
+ collector->SweepInParallel(this, size_in_bytes);
+ if (free_chunk >= size_in_bytes) {
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ // We should be able to allocate an object here since we just freed that
+ // much memory.
+ ASSERT(object != NULL);
+ if (object != NULL) return object;
+ }
+
// Wait for the sweeper threads here and complete the sweeping phase.
- collector->EnsureSweepingCompleted();
+ collector->WaitUntilSweepingCompleted();
// After waiting for the sweeper threads, there may be new free-list
// entries.
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
+ // If sweeper threads are active, try to re-fill the free-lists.
MarkCompactCollector* collector = heap()->mark_compact_collector();
- // Sweeping is still in progress.
- if (collector->sweeping_in_progress()) {
- // First try to refill the free-list, concurrent sweeper threads
- // may have freed some objects in the meantime.
+ if (collector->IsConcurrentSweepingInProgress(this)) {
collector->RefillFreeList(this);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
-
- // If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk =
- collector->SweepInParallel(this, size_in_bytes);
- collector->RefillFreeList(this);
- if (free_chunk >= size_in_bytes) {
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- // We should be able to allocate an object here since we just freed that
- // much memory.
- ASSERT(object != NULL);
- if (object != NULL) return object;
- }
}
// Free list allocation failed and there is no next page. Fail if we have
&& heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ HeapObject* object = EnsureSweepingProgress(size_in_bytes);
if (object != NULL) return object;
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return EnsureSweepingProgress(size_in_bytes);
}
// aligned, and the size should be a non-zero multiple of the word size.
int Free(Address start, int size_in_bytes);
- // This method returns how much memory can be allocated after freeing
- // maximum_freed memory.
- int GuaranteedAllocatable(int maximum_freed) {
- if (maximum_freed < kSmallListMin) {
- return 0;
- } else if (maximum_freed <= kSmallListMax) {
- return kSmallAllocationMax;
- } else if (maximum_freed <= kMediumListMax) {
- return kMediumAllocationMax;
- } else if (maximum_freed <= kLargeListMax) {
- return kLargeAllocationMax;
- }
- return maximum_freed;
- }
-
// Allocate a block of size 'size_in_bytes' from the free list. The block
// is unitialized. A failure is returned if no block is available. The
// number of bytes lost to fragmentation is returned in the output parameter
bool is_iterable() { return is_iterable_; }
void set_is_iterable(bool b) { is_iterable_ = b; }
+ bool is_swept_concurrently() { return is_swept_concurrently_; }
+ void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; }
+
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
static bool ShouldBeSweptBySweeperThreads(Page* p) {
// This space was swept precisely, hence it is iterable.
bool is_iterable_;
+ // This space is currently swept by sweeper threads.
+ bool is_swept_concurrently_;
+
// The number of free bytes which could be reclaimed by advancing the
// concurrent sweeper threads. This is only an estimation because concurrent
// sweeping is done conservatively.
// If sweeping is still in progress try to sweep unswept pages. If that is
// not successful, wait for the sweeper threads and re-try free-list
// allocation.
- MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes);
+ MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes);
// Slow path of AllocateRaw. This function is space-dependent.
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
static void SimulateIncrementalMarking() {
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
IncrementalMarking* marking = CcTest::heap()->incremental_marking();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
CHECK(marking->IsMarking() || marking->IsStopped());
if (marking->IsStopped()) {
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
// Waiting for sweeper threads should not change heap size.
- if (collector->sweeping_in_progress()) {
- collector->EnsureSweepingCompleted();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
}