Split up marking step and marking speed up functionality.
authorhpayer@chromium.org <hpayer@chromium.org>
Wed, 10 Sep 2014 07:23:38 +0000 (07:23 +0000)
committerhpayer@chromium.org <hpayer@chromium.org>
Wed, 10 Sep 2014 07:23:38 +0000 (07:23 +0000)
BUG=
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/556503002

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23822 ce2b1a6d-e550-0410-aec6-3dcde31c8c00

src/heap/incremental-marking.cc
src/heap/incremental-marking.h

index c922e83..8f92976 100644 (file)
@@ -821,6 +821,72 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
 }
 
 
+void IncrementalMarking::SpeedUp() {
+  bool speed_up = false;
+
+  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking after %d steps\n",
+               static_cast<int>(kMarkingSpeedAccellerationInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
+       old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (marking_speed_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking =
+      heap_->PromotedTotalSize() -
+      old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = marking_speed_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintPID("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      marking_speed_ += kMarkingSpeedAccelleration;
+      marking_speed_ = static_cast<int>(
+          Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintPID("Marking speed increased to %d\n", marking_speed_);
+      }
+    }
+  }
+}
+
+
 void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
                               bool force_marking) {
   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
@@ -877,69 +943,9 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
 
     steps_count_++;
 
-    bool speed_up = false;
-
-    if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
-      if (FLAG_trace_gc) {
-        PrintPID("Speed up marking after %d steps\n",
-                 static_cast<int>(kMarkingSpeedAccellerationInterval));
-      }
-      speed_up = true;
-    }
-
-    bool space_left_is_very_small =
-        (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
-    bool only_1_nth_of_space_that_was_available_still_left =
-        (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
-         old_generation_space_available_at_start_of_incremental_);
-
-    if (space_left_is_very_small ||
-        only_1_nth_of_space_that_was_available_still_left) {
-      if (FLAG_trace_gc)
-        PrintPID("Speed up marking because of low space left\n");
-      speed_up = true;
-    }
-
-    bool size_of_old_space_multiplied_by_n_during_marking =
-        (heap_->PromotedTotalSize() >
-         (marking_speed_ + 1) *
-             old_generation_space_used_at_start_of_incremental_);
-    if (size_of_old_space_multiplied_by_n_during_marking) {
-      speed_up = true;
-      if (FLAG_trace_gc) {
-        PrintPID("Speed up marking because of heap size increase\n");
-      }
-    }
-
-    int64_t promoted_during_marking =
-        heap_->PromotedTotalSize() -
-        old_generation_space_used_at_start_of_incremental_;
-    intptr_t delay = marking_speed_ * MB;
-    intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
-    // We try to scan at at least twice the speed that we are allocating.
-    if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
-      if (FLAG_trace_gc) {
-        PrintPID("Speed up marking because marker was not keeping up\n");
-      }
-      speed_up = true;
-    }
-
-    if (speed_up) {
-      if (state_ != MARKING) {
-        if (FLAG_trace_gc) {
-          PrintPID("Postponing speeding up marking until marking starts\n");
-        }
-      } else {
-        marking_speed_ += kMarkingSpeedAccelleration;
-        marking_speed_ = static_cast<int>(
-            Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
-        if (FLAG_trace_gc) {
-          PrintPID("Marking speed increased to %d\n", marking_speed_);
-        }
-      }
-    }
+    // Speed up marking if we are marking too slow or if we are almost done
+    // with marking.
+    SpeedUp();
 
     double end = base::OS::TimeCurrentMillis();
     double duration = (end - start);
index c054fbd..3ccb12a 100644 (file)
@@ -166,6 +166,8 @@ class IncrementalMarking {
  private:
   int64_t SpaceLeftInOldSpace();
 
+  void SpeedUp();
+
   void ResetStepCounters();
 
   void StartMarking(CompactionFlag flag);