From 5e87a0997bad554799cdf1a07e46ecfd0af8e1fa Mon Sep 17 00:00:00 2001 From: ulan Date: Wed, 27 May 2015 06:07:47 -0700 Subject: [PATCH] New algorithm for selecting evacuation candidates This lifts the sqrt(n) limit on number of evacuation candidates, replaces O(n * sqrt(n)) algorithm with O(n*log(n)) algorithm, and removes hard-coded constants. Evacuation candidates are selected as follows: 1) Sort pages from the most free to the least free. 2) Select the first m pages as evacuation candidates such that m is as large as possible under the two conditions: - The total size of live objects in the first m pages does not exceed the given limit. This is based on the assumption that the evacuation cost is proportional to the total size of moved objects. - The fragmentation of the (m+1)-th page does not exceed the given limit. Review URL: https://codereview.chromium.org/1038313003 Cr-Commit-Position: refs/heads/master@{#28651} --- src/heap/mark-compact.cc | 275 ++++++++++++--------------------------- src/heap/spaces.cc | 8 -- src/heap/spaces.h | 38 +++--- 3 files changed, 100 insertions(+), 221 deletions(-) diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 18bb1ccc4..57361e131 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -641,136 +641,15 @@ const char* AllocationSpaceName(AllocationSpace space) { } -// Returns zero for pages that have so little fragmentation that it is not -// worth defragmenting them. Otherwise a positive integer that gives an -// estimate of fragmentation on an arbitrary scale. -static int FreeListFragmentation(PagedSpace* space, Page* p) { - // If page was not swept then there are no free list items on it. - if (!p->WasSwept()) { - if (FLAG_trace_fragmentation_verbose) { - PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast(p), - AllocationSpaceName(space->identity()), p->LiveBytes()); - } - return FLAG_always_compact ? 1 : 0; - } - - PagedSpace::SizeStats sizes; - space->ObtainFreeListStatistics(p, &sizes); - - intptr_t ratio; - intptr_t ratio_threshold; - intptr_t area_size = space->AreaSize(); - if (space->identity() == CODE_SPACE) { - ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size; - ratio_threshold = 10; - } else { - ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size; - ratio_threshold = 15; - } - - if (FLAG_trace_fragmentation_verbose) { - PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", - reinterpret_cast(p), AllocationSpaceName(space->identity()), - static_cast(sizes.small_size_), - static_cast(sizes.small_size_ * 100) / area_size, - static_cast(sizes.medium_size_), - static_cast(sizes.medium_size_ * 100) / area_size, - static_cast(sizes.large_size_), - static_cast(sizes.large_size_ * 100) / area_size, - static_cast(sizes.huge_size_), - static_cast(sizes.huge_size_ * 100) / area_size, - (ratio > ratio_threshold) ? "[fragmented]" : ""); - } - - if (FLAG_always_compact && sizes.Total() != area_size) { - return 1; - } - - if (ratio <= ratio_threshold) return 0; // Not fragmented. - - return static_cast(ratio - ratio_threshold); -} - - void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE); - static const int kMaxMaxEvacuationCandidates = 1000; int number_of_pages = space->CountTotalPages(); - int max_evacuation_candidates = - static_cast(std::sqrt(number_of_pages / 2.0) + 1); - - if (FLAG_stress_compaction || FLAG_always_compact) { - max_evacuation_candidates = kMaxMaxEvacuationCandidates; - } - - class Candidate { - public: - Candidate() : fragmentation_(0), page_(NULL) {} - Candidate(int f, Page* p) : fragmentation_(f), page_(p) {} - - int fragmentation() { return fragmentation_; } - Page* page() { return page_; } - - private: - int fragmentation_; - Page* page_; - }; - - enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT }; - - CompactionMode mode = COMPACT_FREE_LISTS; - - intptr_t reserved = number_of_pages * space->AreaSize(); - intptr_t over_reserved = reserved - space->SizeOfObjects(); - static const intptr_t kFreenessThreshold = 50; - - if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) { - // If reduction of memory footprint was requested, we are aggressive - // about choosing pages to free. We expect that half-empty pages - // are easier to compact so slightly bump the limit. - mode = REDUCE_MEMORY_FOOTPRINT; - max_evacuation_candidates += 2; - } - - - if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) { - // If over-usage is very high (more than a third of the space), we - // try to free all mostly empty pages. We expect that almost empty - // pages are even easier to compact so bump the limit even more. - mode = REDUCE_MEMORY_FOOTPRINT; - max_evacuation_candidates *= 2; - } - - if (FLAG_always_compact) { - max_evacuation_candidates = kMaxMaxEvacuationCandidates; - } - - if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { - PrintF( - "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), " - "evacuation candidate limit: %d\n", - static_cast(over_reserved) / MB, - static_cast(reserved) / MB, - static_cast(kFreenessThreshold), max_evacuation_candidates); - } - - intptr_t estimated_release = 0; - - if (FLAG_trace_fragmentation && - max_evacuation_candidates >= kMaxMaxEvacuationCandidates) { - PrintF("Hit max page compaction limit of %d pages\n", - kMaxMaxEvacuationCandidates); - } - max_evacuation_candidates = - Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); + int area_size = space->AreaSize(); - std::vector candidates(max_evacuation_candidates); - - int count = 0; - int fragmentation = 0; - int page_number = 0; - int least_index = -1; + // Pairs of (live_bytes_in_page, page). + std::vector > pages; + pages.reserve(number_of_pages); PageIterator it(space); while (it.has_next()) { @@ -781,88 +660,102 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { p->ClearFlag(Page::POPULAR_PAGE); continue; } - // Invariant: Evacuation candidates are just created when marking is // started. At the end of a GC all evacuation candidates are cleared and // their slot buffers are released. CHECK(!p->IsEvacuationCandidate()); CHECK(p->slots_buffer() == NULL); - - if (FLAG_stress_compaction) { - if (FLAG_manual_evacuation_candidates_selection) { - if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { - p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); - fragmentation = 1; - } - } else { - unsigned int counter = space->heap()->ms_count(); - if ((counter & 1) == (page_number & 1)) fragmentation = 1; - page_number++; - } - } else if (mode == REDUCE_MEMORY_FOOTPRINT && !FLAG_always_compact) { - // Don't try to release too many pages. - if (estimated_release >= over_reserved) { - continue; + DCHECK(p->area_size() == area_size); + int live_bytes = + p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes(); + pages.push_back(std::make_pair(live_bytes, p)); + } + + int candidate_count = 0; + int total_live_bytes = 0; + + bool reduce_memory = + reduce_memory_footprint_ || + heap()->HasLowAllocationRate( + heap()->tracer()->CurrentAllocationThroughputInBytesPerMillisecond()); + + if (FLAG_stress_compaction || FLAG_manual_evacuation_candidates_selection) { + for (size_t i = 0; i < pages.size(); i++) { + Page* p = pages[i].second; + if (((i % 2 == 0) && FLAG_stress_compaction) || + p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { + candidate_count++; + total_live_bytes += pages[i].first; + p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); + AddEvacuationCandidate(p); } + } + } else { + const int kTargetFragmentationPercent = 50; + const int kMaxEvacuatedBytes = 4 * Page::kPageSize; - intptr_t free_bytes = 0; - - if (!p->WasSwept()) { - free_bytes = (p->area_size() - p->LiveBytes()); - } else { - PagedSpace::SizeStats sizes; - space->ObtainFreeListStatistics(p, &sizes); - free_bytes = sizes.Total(); - } + const int kTargetFragmentationPercentForReduceMemory = 20; + const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize; - int free_pct = static_cast(free_bytes * 100) / p->area_size(); + int max_evacuated_bytes; + int target_fragmentation_percent; - if (free_pct >= kFreenessThreshold) { - estimated_release += free_bytes; - fragmentation = free_pct; - } else { - fragmentation = 0; + if (reduce_memory) { + target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory; + max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory; + } else { + target_fragmentation_percent = kTargetFragmentationPercent; + max_evacuated_bytes = kMaxEvacuatedBytes; + } + intptr_t free_bytes_threshold = + target_fragmentation_percent * (area_size / 100); + + // Sort pages from the most free to the least free, then select + // the first n pages for evacuation such that: + // - the total size of evacuated objects does not exceed the specified + // limit. + // - fragmentation of (n+1)-th page does not exceed the specified limit. + std::sort(pages.begin(), pages.end()); + for (size_t i = 0; i < pages.size(); i++) { + int live_bytes = pages[i].first; + int free_bytes = area_size - live_bytes; + if (FLAG_always_compact || + (free_bytes >= free_bytes_threshold && + total_live_bytes + live_bytes <= max_evacuated_bytes)) { + candidate_count++; + total_live_bytes += live_bytes; } - if (FLAG_trace_fragmentation_verbose) { - PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast(p), - AllocationSpaceName(space->identity()), - static_cast(free_bytes), - static_cast(free_bytes * 100) / p->area_size(), - (fragmentation > 0) ? "[fragmented]" : ""); + PrintF( + "Page in %s: %d KB free [fragmented if this >= %d KB], " + "sum of live bytes in fragmented pages %d KB [max is %d KB]\n", + AllocationSpaceName(space->identity()), + static_cast(free_bytes / KB), + static_cast(free_bytes_threshold / KB), + static_cast(total_live_bytes / KB), + static_cast(max_evacuated_bytes / KB)); } - } else { - fragmentation = FreeListFragmentation(space, p); } - - if (fragmentation != 0) { - if (count < max_evacuation_candidates) { - candidates[count++] = Candidate(fragmentation, p); - } else { - if (least_index == -1) { - for (int i = 0; i < max_evacuation_candidates; i++) { - if (least_index == -1 || - candidates[i].fragmentation() < - candidates[least_index].fragmentation()) { - least_index = i; - } - } - } - if (candidates[least_index].fragmentation() < fragmentation) { - candidates[least_index] = Candidate(fragmentation, p); - least_index = -1; - } - } + // How many pages we will allocated for the evacuated objects + // in the worst case: ceil(total_live_bytes / area_size) + int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size; + DCHECK_LE(estimated_new_pages, candidate_count); + int estimated_released_pages = candidate_count - estimated_new_pages; + // Avoid (compact -> expand) cycles. + if (estimated_released_pages == 0 && !FLAG_always_compact) + candidate_count = 0; + for (int i = 0; i < candidate_count; i++) { + AddEvacuationCandidate(pages[i].second); } } - for (int i = 0; i < count; i++) { - AddEvacuationCandidate(candidates[i].page()); - } - - if (count > 0 && FLAG_trace_fragmentation) { - PrintF("Collected %d evacuation candidates for space %s\n", count, - AllocationSpaceName(space->identity())); + if (FLAG_trace_fragmentation) { + PrintF( + "Collected %d evacuation candidates [%d KB live] for space %s " + "[mode %s]\n", + candidate_count, static_cast(total_live_bytes / KB), + AllocationSpaceName(space->identity()), + (reduce_memory ? "reduce memory footprint" : "normal")); } } diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 8069e51ce..821e701f6 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -1055,14 +1055,6 @@ int PagedSpace::CountTotalPages() { } -void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) { - sizes->huge_size_ = page->available_in_huge_free_list(); - sizes->small_size_ = page->available_in_small_free_list(); - sizes->medium_size_ = page->available_in_medium_free_list(); - sizes->large_size_ = page->available_in_large_free_list(); -} - - void PagedSpace::ResetFreeListStatistics() { PageIterator page_iterator(this); while (page_iterator.has_next()) { diff --git a/src/heap/spaces.h b/src/heap/spaces.h index c81cf7725..21548a049 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -678,11 +678,11 @@ class MemoryChunk { base::AtomicWord parallel_sweeping_; // PagedSpace free-list statistics. - intptr_t available_in_small_free_list_; - intptr_t available_in_medium_free_list_; - intptr_t available_in_large_free_list_; - intptr_t available_in_huge_free_list_; - intptr_t non_available_small_blocks_; + int available_in_small_free_list_; + int available_in_medium_free_list_; + int available_in_large_free_list_; + int available_in_huge_free_list_; + int non_available_small_blocks_; static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, Address area_start, Address area_end, @@ -776,16 +776,22 @@ class Page : public MemoryChunk { void ResetFreeListStatistics(); + int LiveBytesFromFreeList() { + return area_size() - non_available_small_blocks_ - + available_in_small_free_list_ - available_in_medium_free_list_ - + available_in_large_free_list_ - available_in_huge_free_list_; + } + #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ type name() { return name##_; } \ void set_##name(type name) { name##_ = name; } \ void add_##name(type name) { name##_ += name; } - FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) #undef FRAGMENTATION_STATS_ACCESSORS @@ -1700,18 +1706,6 @@ class PagedSpace : public Space { // Approximate amount of physical memory committed for this space. size_t CommittedPhysicalMemory() override; - struct SizeStats { - intptr_t Total() { - return small_size_ + medium_size_ + large_size_ + huge_size_; - } - - intptr_t small_size_; - intptr_t medium_size_; - intptr_t large_size_; - intptr_t huge_size_; - }; - - void ObtainFreeListStatistics(Page* p, SizeStats* sizes); void ResetFreeListStatistics(); // Sets the capacity, the available space and the wasted space to zero. -- 2.34.1