From ae0a7ec93a9145652af74d26041fd96d2ca9a2c7 Mon Sep 17 00:00:00 2001 From: "ulan@chromium.org" Date: Fri, 4 May 2012 09:36:46 +0000 Subject: [PATCH] Use correct size of promoted space for setting promotion and allocation limits. Review URL: https://chromiumcodereview.appspot.com/10376008 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11513 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- src/heap.cc | 12 +----------- src/heap.h | 18 ++---------------- src/incremental-marking-inl.h | 2 +- src/incremental-marking.cc | 2 +- src/mark-compact.cc | 2 +- src/spaces.cc | 2 -- 6 files changed, 6 insertions(+), 32 deletions(-) diff --git a/src/heap.cc b/src/heap.cc index e2e0e9e..ad28c1e 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -805,7 +805,7 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, UpdateSurvivalRateTrend(start_new_space_size); - size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize(); + size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSizeOfObjects(); if (high_survival_rate_during_scavenges && IsStableOrIncreasingSurvivalTrend()) { @@ -5810,16 +5810,6 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { } -intptr_t Heap::PromotedSpaceSize() { - return old_pointer_space_->Size() - + old_data_space_->Size() - + code_space_->Size() - + map_space_->Size() - + cell_space_->Size() - + lo_space_->Size(); -} - - intptr_t Heap::PromotedSpaceSizeOfObjects() { return old_pointer_space_->SizeOfObjects() + old_data_space_->SizeOfObjects() diff --git a/src/heap.h b/src/heap.h index b91416f..beb1bc5 100644 --- a/src/heap.h +++ b/src/heap.h @@ -1342,7 +1342,7 @@ class Heap { PretenureFlag pretenure); inline intptr_t PromotedTotalSize() { - return PromotedSpaceSize() + PromotedExternalMemorySize(); + return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize(); } // True if we have reached the allocation limit in the old generation that @@ -1363,19 +1363,6 @@ class Heap { static const intptr_t kMinimumAllocationLimit = 8 * (Page::kPageSize > MB ? Page::kPageSize : MB); - // When we sweep lazily we initially guess that there is no garbage on the - // heap and set the limits for the next GC accordingly. As we sweep we find - // out that some of the pages contained garbage and we have to adjust - // downwards the size of the heap. This means the limits that control the - // timing of the next GC also need to be adjusted downwards. - void LowerOldGenLimits(intptr_t adjustment) { - size_of_old_gen_at_last_old_space_gc_ -= adjustment; - old_gen_promotion_limit_ = - OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_); - old_gen_allocation_limit_ = - OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_); - } - intptr_t OldGenPromotionLimit(intptr_t old_gen_size) { const int divisor = FLAG_stress_compaction ? 10 : 3; intptr_t limit = @@ -1468,7 +1455,7 @@ class Heap { intptr_t adjusted_allocation_limit = old_gen_allocation_limit_ - new_space_.Capacity() / 5; - if (PromotedSpaceSize() >= adjusted_allocation_limit) return true; + if (PromotedSpaceSizeOfObjects() >= adjusted_allocation_limit) return true; return false; } @@ -1506,7 +1493,6 @@ class Heap { GCTracer* tracer() { return tracer_; } // Returns the size of objects residing in non new spaces. - intptr_t PromotedSpaceSize(); intptr_t PromotedSpaceSizeOfObjects(); double total_regexp_code_generated() { return total_regexp_code_generated_; } diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h index 3e3d6c4..5ce003f 100644 --- a/src/incremental-marking-inl.h +++ b/src/incremental-marking-inl.h @@ -100,7 +100,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, int64_t old_bytes_rescanned = bytes_rescanned_; bytes_rescanned_ = old_bytes_rescanned + obj_size; if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { - if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSize()) { + if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) { // If we have queued twice the heap size for rescanning then we are // going around in circles, scanning the same objects again and again // as the program mutates the heap faster than we can incrementally diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc index 2413b67..5b58c9d 100644 --- a/src/incremental-marking.cc +++ b/src/incremental-marking.cc @@ -951,7 +951,7 @@ void IncrementalMarking::ResetStepCounters() { int64_t IncrementalMarking::SpaceLeftInOldSpace() { - return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSize(); + return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); } } } // namespace v8::internal diff --git a/src/mark-compact.cc b/src/mark-compact.cc index 4216e16..5746d23 100644 --- a/src/mark-compact.cc +++ b/src/mark-compact.cc @@ -3829,7 +3829,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { bool lazy_sweeping_active = false; bool unused_page_present = false; - intptr_t old_space_size = heap()->PromotedSpaceSize(); + intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects(); intptr_t space_left = Min(heap()->OldGenPromotionLimit(old_space_size), heap()->OldGenAllocationLimit(old_space_size)) - old_space_size; diff --git a/src/spaces.cc b/src/spaces.cc index a5d61eb..a0c8f2c 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -2295,8 +2295,6 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { first_unswept_page_ = p; } - heap()->LowerOldGenLimits(freed_bytes); - heap()->FreeQueuedChunks(); return IsSweepingComplete(); -- 2.7.4