// mark-compact.cc
DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(lazy_sweeping, true,
- "Use lazy sweeping for old pointer and data spaces")
DEFINE_bool(never_compact, false,
"Never perform compaction on full GC - testing only")
DEFINE_bool(compact_code_space, true,
// An incremental GC progresses as follows:
// 1. many incremental marking steps,
// 2. one old space mark-sweep-compact,
- // 3. many lazy sweep steps.
// Use mark-sweep-compact events to count incremental GCs in a round.
- if (incremental_marking()->IsStopped()) {
- if (!mark_compact_collector()->AreSweeperThreadsActivated() &&
- !IsSweepingComplete() &&
- !AdvanceSweepers(static_cast<int>(step_size))) {
- return false;
- }
- }
-
if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
if (EnoughGarbageSinceLastIdleRound()) {
StartIdleRound();
}
-bool Heap::AdvanceSweepers(int step_size) {
- ASSERT(!mark_compact_collector()->AreSweeperThreadsActivated());
- bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size);
- sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size);
- return sweeping_complete;
-}
-
-
int64_t Heap::PromotedExternalMemorySize() {
if (amount_of_external_allocated_memory_
<= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
return &incremental_marking_;
}
- bool IsSweepingComplete() {
- return !mark_compact_collector()->IsConcurrentSweepingInProgress() &&
- old_data_space()->IsLazySweepingComplete() &&
- old_pointer_space()->IsLazySweepingComplete();
- }
-
- bool AdvanceSweepers(int step_size);
-
bool EnsureSweepersProgressed(int step_size) {
bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size);
sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size);
ResetStepCounters();
- if (heap_->IsSweepingComplete()) {
+ if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
StartMarking(flag);
} else {
if (FLAG_trace_incremental_marking) {
void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
- sweeper == LAZY_CONSERVATIVE ||
sweeper == PARALLEL_CONSERVATIVE ||
sweeper == CONCURRENT_CONSERVATIVE);
space->ClearStats();
PageIterator it(space);
int pages_swept = 0;
- bool lazy_sweeping_active = false;
bool unused_page_present = false;
bool parallel_sweeping_active = false;
pages_swept++;
break;
}
- case LAZY_CONSERVATIVE: {
- if (lazy_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
- reinterpret_cast<intptr_t>(p));
- }
- space->IncreaseUnsweptFreeBytes(p);
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
- reinterpret_cast<intptr_t>(p));
- }
- SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
- pages_swept++;
- space->SetPagesToSweep(p->next_page());
- lazy_sweeping_active = true;
- }
- break;
- }
case CONCURRENT_CONSERVATIVE:
case PARALLEL_CONSERVATIVE: {
if (!parallel_sweeping_active) {
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
- SweeperType how_to_sweep =
- FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ SweeperType how_to_sweep = CONSERVATIVE;
if (AreSweeperThreadsActivated()) {
if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
enum SweeperType {
CONSERVATIVE,
- LAZY_CONSERVATIVE,
PARALLEL_CONSERVATIVE,
CONCURRENT_CONSERVATIVE,
PRECISE
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)),
unswept_free_bytes_(0) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
- // Adjust list of unswept pages if the page is the head of the list.
- if (first_unswept_page_ == page) {
- first_unswept_page_ = page->next_page();
- if (first_unswept_page_ == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- }
- }
-
if (page->WasSwept()) {
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // Stop lazy sweeping and clear marking bits for unswept pages.
- if (first_unswept_page_ != NULL) {
- Page* p = first_unswept_page_;
- do {
- // Do not use ShouldBeSweptLazily predicate here.
- // New evacuation candidates were selected but they still have
- // to be swept before collection starts.
- if (!p->WasSwept()) {
- Bitmap::Clear(p);
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
- reinterpret_cast<intptr_t>(p));
- }
- }
- p = p->next_page();
- } while (p != anchor());
- }
- first_unswept_page_ = Page::FromAddress(NULL);
+ // This counter will be increased for pages which will be swept by the
+ // sweeper threads.
unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
intptr_t PagedSpace::SizeOfObjects() {
- ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0));
+ ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
+ (unswept_free_bytes_ == 0));
return Size() - unswept_free_bytes_ - (limit() - top());
}
}
-bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
- if (IsLazySweepingComplete()) return true;
-
- intptr_t freed_bytes = 0;
- Page* p = first_unswept_page_;
- do {
- Page* next_page = p->next_page();
- if (ShouldBeSweptLazily(p)) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
- reinterpret_cast<intptr_t>(p));
- }
- DecreaseUnsweptFreeBytes(p);
- freed_bytes +=
- MarkCompactCollector::
- SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
- this, NULL, p);
- }
- p = next_page;
- } while (p != anchor() && freed_bytes < bytes_to_sweep);
-
- if (p == anchor()) {
- first_unswept_page_ = Page::FromAddress(NULL);
- } else {
- first_unswept_page_ = p;
- }
-
- heap()->FreeQueuedChunks();
-
- return IsLazySweepingComplete();
-}
-
-
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
if (allocation_info_.top() >= allocation_info_.limit()) return;
}
return false;
}
- return true;
- } else {
- return AdvanceSweeper(size_in_bytes);
}
+ return true;
}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper a bounded number of times
+ // If there are unswept pages advance sweeping a bounded number of times
// until we find a size_in_bytes contiguous piece of memory
const int kMaxSweepingTries = 5;
bool sweeping_complete = false;
return free_list_.Allocate(size_in_bytes);
}
- // Last ditch, sweep all the remaining pages to try to find space. This may
- // cause a pause.
- if (!IsLazySweepingComplete()) {
- EnsureSweeperProgress(kMaxInt);
+ // Last ditch, sweep all the remaining pages to try to find space.
+ if (heap()->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
+ heap()->mark_compact_collector()->WaitUntilSweepingCompleted();
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
intptr_t Available() { return free_list_.available(); }
// Allocated bytes in this space. Garbage bytes that were not found due to
- // lazy sweeping are counted as being allocated! The bytes in the current
- // linear allocation area (between top and limit) are also counted here.
+ // concurrent sweeping are counted as being allocated! The bytes in the
+ // current linear allocation area (between top and limit) are also counted
+ // here.
virtual intptr_t Size() { return accounting_stats_.Size(); }
// As size, but the bytes in lazily swept pages are estimated and the bytes
// Evacuation candidates are swept by evacuator. Needs to return a valid
// result before _and_ after evacuation has finished.
- static bool ShouldBeSweptLazily(Page* p) {
+ static bool ShouldBeSweptBySweeperThreads(Page* p) {
return !p->IsEvacuationCandidate() &&
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
!p->WasSweptPrecisely();
}
- void SetPagesToSweep(Page* first) {
- ASSERT(unswept_free_bytes_ == 0);
- if (first == &anchor_) first = NULL;
- first_unswept_page_ = first;
- }
-
void IncrementUnsweptFreeBytes(intptr_t by) {
unswept_free_bytes_ += by;
}
void IncreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
+ ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
}
}
void DecreaseUnsweptFreeBytes(Page* p) {
- ASSERT(ShouldBeSweptLazily(p));
+ ASSERT(ShouldBeSweptBySweeperThreads(p));
unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
}
unswept_free_bytes_ = 0;
}
- bool AdvanceSweeper(intptr_t bytes_to_sweep);
-
- // When parallel sweeper threads are active and the main thread finished
- // its sweeping phase, this function waits for them to complete, otherwise
- // AdvanceSweeper with size_in_bytes is called.
+ // This function tries to steal size_in_bytes memory from the sweeper threads
+ // free-lists. If it does not succeed stealing enough memory, it will wait
+ // for the sweeper threads to finish sweeping.
+ // It returns true when sweeping is completed and false otherwise.
bool EnsureSweeperProgress(intptr_t size_in_bytes);
- bool IsLazySweepingComplete() {
- return !first_unswept_page_->is_valid();
- }
-
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
bool was_swept_conservatively_;
- // The first page to be swept when the lazy sweeper advances. Is set
- // to NULL when all pages have been swept.
- Page* first_unswept_page_;
-
// The number of free bytes which could be reclaimed by advancing the
- // lazy sweeper. This is only an estimation because lazy sweeping is
- // done conservatively.
+ // concurrent sweeper threads. This is only an estimation because concurrent
+ // sweeping is done conservatively.
intptr_t unswept_free_bytes_;
// Expands the space by allocating a fixed number of pages. Returns false if
// This function iterates over all the pointers in a paged space in the heap,
// looking for pointers into new space. Within the pages there may be dead
// objects that have not been overwritten by free spaces or fillers because of
-// lazy sweeping. These dead objects may not contain pointers to new space.
-// The garbage areas that have been swept properly (these will normally be the
-// large ones) will be marked with free space and filler map words. In
+// concurrent sweeping. These dead objects may not contain pointers to new
+// space. The garbage areas that have been swept properly (these will normally
+// be the large ones) will be marked with free space and filler map words. In
// addition any area that has never been used at all for object allocation must
// be marked with a free space or filler. Because the free space and filler
// maps do not move we can always recognize these even after a compaction.
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(CcTest::heap()->old_pointer_space()->IsLazySweepingComplete());
+ MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
+ }
int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
{
// Allocate objects on several different old-space pages so that
- // lazy sweeping kicks in for subsequent GC runs.
+ // concurrent sweeper threads will be busy sweeping the old space on
+ // subsequent GC runs.
AlwaysAllocateScope always_allocate(CcTest::i_isolate());
int filler_size = static_cast<int>(FixedArray::SizeFor(8192));
for (int i = 1; i <= 100; i++) {
CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
- // Advancing the sweeper step-wise should not change the heap size.
- while (!CcTest::heap()->old_pointer_space()->IsLazySweepingComplete()) {
- CcTest::heap()->old_pointer_space()->AdvanceSweeper(KB);
- CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
+ // Waiting for sweeper threads should not change heap size.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
}
+ CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
}
}
-TEST(ArrayShiftLazySweeping) {
+TEST(ArrayShiftSweeping) {
i::FLAG_expose_gc = true;
- i::FLAG_parallel_sweeping = false;
- i::FLAG_concurrent_sweeping = false;
- i::FLAG_lazy_sweeping = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Isolate* isolate = CcTest::i_isolate();
// Regression test of a very rare corner case where left-trimming an
// array caused invalid marking bit patterns on lazily swept pages.
+//
+// Lazy sweeping was deprecated. We are keeping the test case to make
+// sure that concurrent sweeping, which relies on similar assumptions
+// as lazy sweeping works correctly.
// Flags: --expose-gc --noincremental-marking --max-new-space-size 1000
var head = new Array(1);
var tail = head;
- // Fill heap to increase old-space size and trigger lazy sweeping on
+ // Fill heap to increase old-space size and trigger concurrent sweeping on
// some of the old-space pages.
for (var i = 0; i < 200; i++) {
tail[1] = new Array(1000);
gc(); gc();
// At this point "array" should have been promoted to old-space and be
- // located in a lazy swept page with intact marking bits. Now shift
+ // located in a concurrently swept page with intact marking bits. Now shift
// the array to trigger left-trimming operations.
assertEquals(100, array.length);
for (var i = 0; i < 50; i++) {
// At this point "array" should have been trimmed from the left with
// marking bits being correctly transfered to the new object start.
- // Scavenging operations cause lazy sweeping to advance and verify
+ // Scavenging operations cause concurrent sweeping to advance and verify
// that marking bit patterns are still sane.
for (var i = 0; i < 200; i++) {
tail[1] = new Array(1000);