Make old generation allocation throughput stats independent from the new space alloca...
authorulan <ulan@chromium.org>
Tue, 9 Jun 2015 17:49:41 +0000 (10:49 -0700)
committerCommit bot <commit-bot@chromium.org>
Tue, 9 Jun 2015 17:49:58 +0000 (17:49 +0000)
BUG=

Review URL: https://codereview.chromium.org/1167563005

Cr-Commit-Position: refs/heads/master@{#28880}

src/heap/gc-idle-time-handler.cc
src/heap/gc-idle-time-handler.h
src/heap/gc-tracer.cc
src/heap/gc-tracer.h
src/heap/heap.cc
src/heap/heap.h
src/heap/mark-compact.cc
test/cctest/test-heap.cc

index 496d7a0..168cc81 100644 (file)
@@ -51,6 +51,7 @@ void GCIdleTimeHandler::HeapState::Print() {
   PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
   PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
   PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
+  PrintF("has_low_allocation_rate=%d", has_low_allocation_rate);
   PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
          mark_compact_speed_in_bytes_per_ms);
   PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
@@ -60,8 +61,6 @@ void GCIdleTimeHandler::HeapState::Print() {
   PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
   PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
          new_space_allocation_throughput_in_bytes_per_ms);
-  PrintF("current_allocation_throughput=%" V8_PTR_PREFIX "d",
-         current_allocation_throughput_in_bytes_per_ms);
 }
 
 
index c370174..c50d6d3 100644 (file)
@@ -181,6 +181,7 @@ class GCIdleTimeHandler {
     bool can_start_incremental_marking;
     bool sweeping_in_progress;
     bool sweeping_completed;
+    bool has_low_allocation_rate;
     size_t mark_compact_speed_in_bytes_per_ms;
     size_t incremental_marking_speed_in_bytes_per_ms;
     size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
@@ -188,7 +189,6 @@ class GCIdleTimeHandler {
     size_t used_new_space_size;
     size_t new_space_capacity;
     size_t new_space_allocation_throughput_in_bytes_per_ms;
-    size_t current_allocation_throughput_in_bytes_per_ms;
   };
 
   GCIdleTimeHandler()
index e4d992c..6ca724e 100644 (file)
@@ -105,6 +105,7 @@ GCTracer::GCTracer(Heap* heap)
       allocation_duration_since_gc_(0.0),
       new_space_allocation_in_bytes_since_gc_(0),
       old_generation_allocation_in_bytes_since_gc_(0),
+      combined_mark_compact_speed_cache_(0.0),
       start_counter_(0) {
   current_ = Event(Event::START, NULL, NULL);
   current_.end_time = base::OS::TimeCurrentMillis();
@@ -226,12 +227,14 @@ void GCTracer::Stop(GarbageCollector collector) {
             .cumulative_pure_incremental_marking_duration;
     longest_incremental_marking_step_ = 0.0;
     incremental_mark_compactor_events_.push_front(current_);
+    combined_mark_compact_speed_cache_ = 0.0;
   } else {
     DCHECK(current_.incremental_marking_bytes == 0);
     DCHECK(current_.incremental_marking_duration == 0);
     DCHECK(current_.pure_incremental_marking_duration == 0);
     longest_incremental_marking_step_ = 0.0;
     mark_compactor_events_.push_front(current_);
+    combined_mark_compact_speed_cache_ = 0.0;
   }
 
   // TODO(ernstm): move the code below out of GCTracer.
@@ -292,10 +295,9 @@ void GCTracer::AddAllocation(double current_ms) {
   allocation_time_ms_ = current_ms;
   new_space_allocation_events_.push_front(AllocationEvent(
       allocation_duration_since_gc_, new_space_allocation_in_bytes_since_gc_));
-  allocation_events_.push_front(
+  old_generation_allocation_events_.push_front(
       AllocationEvent(allocation_duration_since_gc_,
-                      new_space_allocation_in_bytes_since_gc_ +
-                          old_generation_allocation_in_bytes_since_gc_));
+                      old_generation_allocation_in_bytes_since_gc_));
   allocation_duration_since_gc_ = 0;
   new_space_allocation_in_bytes_since_gc_ = 0;
   old_generation_allocation_in_bytes_since_gc_ = 0;
@@ -560,8 +562,8 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
@@ -576,8 +578,8 @@ intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
@@ -592,8 +594,8 @@ intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<intptr_t>(bytes / durations);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
@@ -609,47 +611,77 @@ intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
   }
 
   if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
 
-  return static_cast<intptr_t>(bytes / durations);
+
+double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
+  if (combined_mark_compact_speed_cache_ > 0)
+    return combined_mark_compact_speed_cache_;
+  const double kEpsilon = 1;
+  double speed1 =
+      static_cast<double>(IncrementalMarkingSpeedInBytesPerMillisecond());
+  double speed2 = static_cast<double>(
+      FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
+  if (speed1 + speed2 < kEpsilon) {
+    // No data for the incremental marking speed.
+    // Return the non-incremental mark-compact speed.
+    combined_mark_compact_speed_cache_ =
+        static_cast<double>(MarkCompactSpeedInBytesPerMillisecond());
+  } else {
+    // Combine the speed of incremental step and the speed of the final step.
+    // 1 / (1 / speed1 + 1 / speed2) = speed1 * speed2 / (speed1 + speed2).
+    combined_mark_compact_speed_cache_ = speed1 * speed2 / (speed1 + speed2);
+  }
+  return combined_mark_compact_speed_cache_;
 }
 
 
-size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
+size_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
   size_t bytes = new_space_allocation_in_bytes_since_gc_;
   double durations = allocation_duration_since_gc_;
   AllocationEventBuffer::const_iterator iter =
       new_space_allocation_events_.begin();
   const size_t max_bytes = static_cast<size_t>(-1);
   while (iter != new_space_allocation_events_.end() &&
-         bytes < max_bytes - bytes) {
+         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
     bytes += iter->allocation_in_bytes_;
     durations += iter->duration_;
     ++iter;
   }
 
   if (durations == 0.0) return 0;
-
-  return static_cast<size_t>(bytes / durations + 0.5);
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
 }
 
 
-size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+size_t GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
     double time_ms) const {
-  size_t bytes = new_space_allocation_in_bytes_since_gc_ +
-                 old_generation_allocation_in_bytes_since_gc_;
+  size_t bytes = old_generation_allocation_in_bytes_since_gc_;
   double durations = allocation_duration_since_gc_;
-  AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
+  AllocationEventBuffer::const_iterator iter =
+      old_generation_allocation_events_.begin();
   const size_t max_bytes = static_cast<size_t>(-1);
-  while (iter != allocation_events_.end() && bytes < max_bytes - bytes &&
-         durations < time_ms) {
+  while (iter != old_generation_allocation_events_.end() &&
+         bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
     bytes += iter->allocation_in_bytes_;
     durations += iter->duration_;
     ++iter;
   }
 
   if (durations == 0.0) return 0;
+  // Make sure the result is at least 1.
+  return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
+}
 
-  return static_cast<size_t>(bytes / durations + 0.5);
+
+size_t GCTracer::AllocationThroughputInBytesPerMillisecond(
+    double time_ms) const {
+  return NewSpaceAllocationThroughputInBytesPerMillisecond(time_ms) +
+         OldGenerationAllocationThroughputInBytesPerMillisecond(time_ms);
 }
 
 
index 5ff846b..33a2ab8 100644 (file)
@@ -382,9 +382,20 @@ class GCTracer {
   // Returns 0 if no events have been recorded.
   intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
 
+  // Compute the overall mark compact speed including incremental steps
+  // and the final mark-compact step.
+  double CombinedMarkCompactSpeedInBytesPerMillisecond();
+
   // Allocation throughput in the new space in bytes/millisecond.
   // Returns 0 if no allocation events have been recorded.
-  size_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+  size_t NewSpaceAllocationThroughputInBytesPerMillisecond(
+      double time_ms = 0) const;
+
+  // Allocation throughput in the old generation in bytes/millisecond in the
+  // last time_ms milliseconds.
+  // Returns 0 if no allocation events have been recorded.
+  size_t OldGenerationAllocationThroughputInBytesPerMillisecond(
+      double time_ms = 0) const;
 
   // Allocation throughput in heap in bytes/millisecond in the last time_ms
   // milliseconds.
@@ -466,7 +477,7 @@ class GCTracer {
 
   // RingBuffer for allocation events.
   AllocationEventBuffer new_space_allocation_events_;
-  AllocationEventBuffer allocation_events_;
+  AllocationEventBuffer old_generation_allocation_events_;
 
   // RingBuffer for context disposal events.
   ContextDisposalEventBuffer context_disposal_events_;
@@ -514,6 +525,8 @@ class GCTracer {
   size_t new_space_allocation_in_bytes_since_gc_;
   size_t old_generation_allocation_in_bytes_since_gc_;
 
+  double combined_mark_compact_speed_cache_;
+
   // Counts how many tracers were started without stopping.
   int start_counter_;
 
index 37eb004..f2783cf 100644 (file)
@@ -470,7 +470,6 @@ void Heap::GarbageCollectionPrologue() {
   }
   CheckNewSpaceExpansionCriteria();
   UpdateNewSpaceAllocationCounter();
-  UpdateOldGenerationAllocationCounter();
 }
 
 
@@ -738,8 +737,7 @@ void Heap::GarbageCollectionEpilogue() {
   new_space_top_after_last_gc_ = new_space()->top();
   last_gc_time_ = MonotonicallyIncreasingTimeInMs();
 
-  ReduceNewSpaceSize(
-      tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
+  ReduceNewSpaceSize();
 }
 
 
@@ -1214,18 +1212,19 @@ bool Heap::PerformGarbageCollection(
   }
 
   if (collector == MARK_COMPACTOR) {
+    UpdateOldGenerationAllocationCounter();
     // Perform mark-sweep with optional compaction.
     MarkCompact();
     sweep_generation_++;
     old_gen_exhausted_ = false;
     old_generation_size_configured_ = true;
+    // This should be updated before PostGarbageCollectionProcessing, which can
+    // cause another GC.
+    old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
   } else {
     Scavenge();
   }
 
-  // This should be updated before PostGarbageCollectionProcessing, which can
-  // cause another GC.
-  old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
 
   UpdateSurvivalStatistics(start_new_space_size);
   ConfigureInitialOldGenerationSize();
@@ -4509,15 +4508,38 @@ void Heap::MakeHeapIterable() {
 }
 
 
-bool Heap::HasLowAllocationRate(size_t allocation_rate) {
-  static const size_t kLowAllocationRate = 1000;
-  if (allocation_rate == 0) return false;
-  return allocation_rate < kLowAllocationRate;
+bool Heap::HasLowYoungGenerationAllocationRate() {
+  const double high_mutator_utilization = 0.995;
+  double mutator_speed = static_cast<double>(
+      tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+  double gc_speed =
+      static_cast<double>(tracer()->ScavengeSpeedInBytesPerMillisecond());
+  if (mutator_speed == 0 || gc_speed == 0) return false;
+  double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
+  return mutator_utilization > high_mutator_utilization;
 }
 
 
-void Heap::ReduceNewSpaceSize(size_t allocation_rate) {
-  if (!FLAG_predictable && HasLowAllocationRate(allocation_rate)) {
+bool Heap::HasLowOldGenerationAllocationRate() {
+  const double high_mutator_utilization = 0.995;
+  double mutator_speed = static_cast<double>(
+      tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond());
+  double gc_speed = static_cast<double>(
+      tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond());
+  if (mutator_speed == 0 || gc_speed == 0) return false;
+  double mutator_utilization = gc_speed / (mutator_speed + gc_speed);
+  return mutator_utilization > high_mutator_utilization;
+}
+
+
+bool Heap::HasLowAllocationRate() {
+  return HasLowYoungGenerationAllocationRate() &&
+         HasLowOldGenerationAllocationRate();
+}
+
+
+void Heap::ReduceNewSpaceSize() {
+  if (!FLAG_predictable && HasLowAllocationRate()) {
     new_space_.Shrink();
     UncommitFromSpace();
   }
@@ -4573,11 +4595,9 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() {
   heap_state.new_space_capacity = new_space_.Capacity();
   heap_state.new_space_allocation_throughput_in_bytes_per_ms =
       tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond();
-  heap_state.current_allocation_throughput_in_bytes_per_ms =
-      tracer()->CurrentAllocationThroughputInBytesPerMillisecond();
+  heap_state.has_low_allocation_rate = HasLowAllocationRate();
   intptr_t limit = old_generation_allocation_limit_;
-  if (HasLowAllocationRate(
-          heap_state.current_allocation_throughput_in_bytes_per_ms)) {
+  if (heap_state.has_low_allocation_rate) {
     limit = idle_old_generation_allocation_limit_;
   }
   heap_state.can_start_incremental_marking =
index 2782ed8..410e0bf 100644 (file)
@@ -2142,9 +2142,11 @@ class Heap {
 
   void SelectScavengingVisitorsTable();
 
-  bool HasLowAllocationRate(size_t allocaion_rate);
+  bool HasLowYoungGenerationAllocationRate();
+  bool HasLowOldGenerationAllocationRate();
+  bool HasLowAllocationRate();
 
-  void ReduceNewSpaceSize(size_t allocaion_rate);
+  void ReduceNewSpaceSize();
 
   bool TryFinalizeIdleIncrementalMarking(
       double idle_time_in_ms, size_t size_of_objects,
index 5a4a1a7..571ebac 100644 (file)
@@ -677,9 +677,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
   int total_live_bytes = 0;
 
   bool reduce_memory =
-      reduce_memory_footprint_ ||
-      heap()->HasLowAllocationRate(
-          heap()->tracer()->CurrentAllocationThroughputInBytesPerMillisecond());
+      reduce_memory_footprint_ || heap()->HasLowAllocationRate();
   if (FLAG_manual_evacuation_candidates_selection) {
     for (size_t i = 0; i < pages.size(); i++) {
       Page* p = pages[i].second;
index dcbe306..02cd560 100644 (file)
@@ -5808,6 +5808,7 @@ TEST(OldSpaceAllocationCounter) {
   Heap* heap = isolate->heap();
   size_t counter1 = heap->OldGenerationAllocationCounter();
   heap->CollectGarbage(NEW_SPACE);
+  heap->CollectGarbage(NEW_SPACE);
   const size_t kSize = 1024;
   AllocateInSpace(isolate, kSize, OLD_SPACE);
   size_t counter2 = heap->OldGenerationAllocationCounter();
@@ -5868,12 +5869,13 @@ TEST(NewSpaceAllocationThroughput2) {
   int time2 = 200;
   size_t counter2 = 2000;
   tracer->SampleAllocation(time2, counter2, 0);
-  size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+  size_t throughput =
+      tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
   CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
   int time3 = 1000;
   size_t counter3 = 30000;
   tracer->SampleAllocation(time3, counter3, 0);
-  throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+  throughput = tracer->NewSpaceAllocationThroughputInBytesPerMillisecond(100);
   CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
 }
 
@@ -5932,12 +5934,14 @@ TEST(OldGenerationAllocationThroughput) {
   int time2 = 200;
   size_t counter2 = 2000;
   tracer->SampleAllocation(time2, 0, counter2);
-  size_t throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+  size_t throughput =
+      tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
   CHECK_EQ((counter2 - counter1) / (time2 - time1), throughput);
   int time3 = 1000;
   size_t counter3 = 30000;
   tracer->SampleAllocation(time3, 0, counter3);
-  throughput = tracer->AllocationThroughputInBytesPerMillisecond(100);
+  throughput =
+      tracer->OldGenerationAllocationThroughputInBytesPerMillisecond(100);
   CHECK_EQ((counter3 - counter1) / (time3 - time1), throughput);
 }