1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/heap/gc-idle-time-handler.h"
6 #include "src/heap/gc-tracer.h"
12 const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
13 const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
14 const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
15 const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
16 const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
19 void GCIdleTimeAction::Print() {
27 case DO_INCREMENTAL_MARKING:
28 PrintF("incremental marking with step %" V8_PTR_PREFIX "d", parameter);
36 case DO_FINALIZE_SWEEPING:
37 PrintF("finalize sweeping");
43 size_t GCIdleTimeHandler::EstimateMarkingStepSize(
44 size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
45 DCHECK(idle_time_in_ms > 0);
47 if (marking_speed_in_bytes_per_ms == 0) {
48 marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
51 size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
52 if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
53 // In the case of an overflow we return maximum marking step size.
54 return kMaximumMarkingStepSize;
57 if (marking_step_size > kMaximumMarkingStepSize)
58 return kMaximumMarkingStepSize;
60 return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
64 size_t GCIdleTimeHandler::EstimateMarkCompactTime(
65 size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
66 // TODO(hpayer): Be more precise about the type of mark-compact event. It
67 // makes a huge difference if it is incremental or non-incremental and if
68 // compaction is happening.
69 if (mark_compact_speed_in_bytes_per_ms == 0) {
70 mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
72 size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
73 return Min(result, kMaxMarkCompactTimeInMs);
77 bool GCIdleTimeHandler::ShouldDoScavenge(
78 size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
79 size_t scavenge_speed_in_bytes_per_ms,
80 size_t new_space_allocation_throughput_in_bytes_per_ms) {
81 size_t new_space_allocation_limit =
82 kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
84 // If the limit is larger than the new space size, then scavenging used to be
85 // really fast. We can take advantage of the whole new space.
86 if (new_space_allocation_limit > new_space_size) {
87 new_space_allocation_limit = new_space_size;
90 // We do not know the allocation throughput before the first Scavenge.
91 // TODO(hpayer): Estimate allocation throughput before the first Scavenge.
92 if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
93 new_space_allocation_limit =
94 static_cast<size_t>(new_space_size * kConservativeTimeRatio);
96 // We have to trigger scavenge before we reach the end of new space.
97 new_space_allocation_limit -=
98 new_space_allocation_throughput_in_bytes_per_ms *
99 kMaxFrameRenderingIdleTime;
102 if (scavenge_speed_in_bytes_per_ms == 0) {
103 scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
106 if (new_space_allocation_limit <= used_new_space_size) {
107 if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
116 bool GCIdleTimeHandler::ShouldDoMarkCompact(
117 size_t idle_time_in_ms, size_t size_of_objects,
118 size_t mark_compact_speed_in_bytes_per_ms) {
119 return idle_time_in_ms >=
120 EstimateMarkCompactTime(size_of_objects,
121 mark_compact_speed_in_bytes_per_ms);
125 // The following logic is implemented by the controller:
126 // (1) If we don't have any idle time, do nothing, unless a context was
127 // disposed, incremental marking is stopped, and the heap is small. Then do
129 // (2) If the new space is almost full and we can affort a Scavenge or if the
130 // next Scavenge will very likely take long, then a Scavenge is performed.
131 // (3) If there is currently no MarkCompact idle round going on, we start a
132 // new idle round if enough garbage was created or we received a context
133 // disposal event. Otherwise we do not perform garbage collection to keep
134 // system utilization low.
135 // (4) If incremental marking is done, we perform a full garbage collection
136 // if context was disposed or if we are allowed to still do full garbage
137 // collections during this idle round or if we are not allowed to start
138 // incremental marking. Otherwise we do not perform garbage collection to
139 // keep system utilization low.
140 // (5) If sweeping is in progress and we received a large enough idle time
141 // request, we finalize sweeping here.
142 // (6) If incremental marking is in progress, we perform a marking step. Note,
143 // that this currently may trigger a full garbage collection.
144 GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
145 HeapState heap_state) {
146 if (idle_time_in_ms == 0) {
147 if (heap_state.incremental_marking_stopped) {
148 if (heap_state.size_of_objects < kSmallHeapSize &&
149 heap_state.contexts_disposed > 0) {
150 return GCIdleTimeAction::FullGC();
153 return GCIdleTimeAction::Nothing();
156 if (ShouldDoScavenge(
157 idle_time_in_ms, heap_state.new_space_capacity,
158 heap_state.used_new_space_size,
159 heap_state.scavenge_speed_in_bytes_per_ms,
160 heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
161 return GCIdleTimeAction::Scavenge();
164 if (IsMarkCompactIdleRoundFinished()) {
165 if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
168 return GCIdleTimeAction::Done();
172 if (heap_state.incremental_marking_stopped) {
173 // TODO(jochen): Remove context disposal dependant logic.
174 if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
175 heap_state.mark_compact_speed_in_bytes_per_ms) ||
176 (heap_state.size_of_objects < kSmallHeapSize &&
177 heap_state.contexts_disposed > 0)) {
178 // If there are no more than two GCs left in this idle round and we are
179 // allowed to do a full GC, then make those GCs full in order to compact
181 // TODO(ulan): Once we enable code compaction for incremental marking, we
182 // can get rid of this special case and always start incremental marking.
183 int remaining_mark_sweeps =
184 kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
185 if (heap_state.contexts_disposed > 0 ||
186 (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
187 (remaining_mark_sweeps <= 2 ||
188 !heap_state.can_start_incremental_marking))) {
189 return GCIdleTimeAction::FullGC();
192 if (!heap_state.can_start_incremental_marking) {
193 return GCIdleTimeAction::Nothing();
196 // TODO(hpayer): Estimate finalize sweeping time.
197 if (heap_state.sweeping_in_progress &&
198 idle_time_in_ms >= kMinTimeForFinalizeSweeping) {
199 return GCIdleTimeAction::FinalizeSweeping();
202 if (heap_state.incremental_marking_stopped &&
203 !heap_state.can_start_incremental_marking) {
204 return GCIdleTimeAction::Nothing();
206 size_t step_size = EstimateMarkingStepSize(
207 idle_time_in_ms, heap_state.incremental_marking_speed_in_bytes_per_ms);
208 return GCIdleTimeAction::IncrementalMarking(step_size);