1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "src/heap/gc-idle-time-handler.h"
6 #include "src/heap/gc-tracer.h"
12 const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
13 const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
14 const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
15 const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
16 const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
17 const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
18 const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
21 void GCIdleTimeAction::Print() {
29 case DO_INCREMENTAL_MARKING:
30 PrintF("incremental marking with step %" V8_PTR_PREFIX "d / ms",
32 if (additional_work) {
33 PrintF("; finalized marking");
42 case DO_FINALIZE_SWEEPING:
43 PrintF("finalize sweeping");
49 void GCIdleTimeHandler::HeapState::Print() {
50 PrintF("contexts_disposed=%d ", contexts_disposed);
51 PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
52 PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
53 PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
54 PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
55 PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
56 PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
57 mark_compact_speed_in_bytes_per_ms);
58 PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
59 incremental_marking_speed_in_bytes_per_ms);
60 PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
61 PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
62 PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
63 PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d",
64 new_space_allocation_throughput_in_bytes_per_ms);
68 size_t GCIdleTimeHandler::EstimateMarkingStepSize(
69 size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
70 DCHECK(idle_time_in_ms > 0);
72 if (marking_speed_in_bytes_per_ms == 0) {
73 marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
76 size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
77 if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
78 // In the case of an overflow we return maximum marking step size.
79 return kMaximumMarkingStepSize;
82 if (marking_step_size > kMaximumMarkingStepSize)
83 return kMaximumMarkingStepSize;
85 return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
89 size_t GCIdleTimeHandler::EstimateMarkCompactTime(
90 size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
91 // TODO(hpayer): Be more precise about the type of mark-compact event. It
92 // makes a huge difference if compaction is happening.
93 if (mark_compact_speed_in_bytes_per_ms == 0) {
94 mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
96 size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
97 return Min(result, kMaxMarkCompactTimeInMs);
101 size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
102 size_t size_of_objects,
103 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
104 if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
105 final_incremental_mark_compact_speed_in_bytes_per_ms =
106 kInitialConservativeFinalIncrementalMarkCompactSpeed;
109 size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
110 return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
114 bool GCIdleTimeHandler::ShouldDoScavenge(
115 size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
116 size_t scavenge_speed_in_bytes_per_ms,
117 size_t new_space_allocation_throughput_in_bytes_per_ms) {
118 size_t new_space_allocation_limit =
119 kMaxFrameRenderingIdleTime * scavenge_speed_in_bytes_per_ms;
121 // If the limit is larger than the new space size, then scavenging used to be
122 // really fast. We can take advantage of the whole new space.
123 if (new_space_allocation_limit > new_space_size) {
124 new_space_allocation_limit = new_space_size;
127 // We do not know the allocation throughput before the first Scavenge.
128 // TODO(hpayer): Estimate allocation throughput before the first Scavenge.
129 if (new_space_allocation_throughput_in_bytes_per_ms == 0) {
130 new_space_allocation_limit =
131 static_cast<size_t>(new_space_size * kConservativeTimeRatio);
133 // We have to trigger scavenge before we reach the end of new space.
134 new_space_allocation_limit -=
135 new_space_allocation_throughput_in_bytes_per_ms *
136 kMaxFrameRenderingIdleTime;
139 if (scavenge_speed_in_bytes_per_ms == 0) {
140 scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
143 if (new_space_allocation_limit <= used_new_space_size) {
144 if (used_new_space_size / scavenge_speed_in_bytes_per_ms <=
153 bool GCIdleTimeHandler::ShouldDoMarkCompact(
154 size_t idle_time_in_ms, size_t size_of_objects,
155 size_t mark_compact_speed_in_bytes_per_ms) {
156 return idle_time_in_ms >=
157 EstimateMarkCompactTime(size_of_objects,
158 mark_compact_speed_in_bytes_per_ms);
162 bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
163 int contexts_disposed, double contexts_disposal_rate) {
164 return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
165 contexts_disposal_rate < kHighContextDisposalRate;
169 bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
170 size_t idle_time_in_ms, size_t size_of_objects,
171 size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
172 return idle_time_in_ms >=
173 EstimateFinalIncrementalMarkCompactTime(
175 final_incremental_mark_compact_speed_in_bytes_per_ms);
179 // The following logic is implemented by the controller:
180 // (1) If we don't have any idle time, do nothing, unless a context was
181 // disposed, incremental marking is stopped, and the heap is small. Then do
183 // (2) If the new space is almost full and we can affort a Scavenge or if the
184 // next Scavenge will very likely take long, then a Scavenge is performed.
185 // (3) If there is currently no MarkCompact idle round going on, we start a
186 // new idle round if enough garbage was created. Otherwise we do not perform
187 // garbage collection to keep system utilization low.
188 // (4) If incremental marking is done, we perform a full garbage collection
189 // if we are allowed to still do full garbage collections during this idle
190 // round or if we are not allowed to start incremental marking. Otherwise we
191 // do not perform garbage collection to keep system utilization low.
192 // (5) If sweeping is in progress and we received a large enough idle time
193 // request, we finalize sweeping here.
194 // (6) If incremental marking is in progress, we perform a marking step. Note,
195 // that this currently may trigger a full garbage collection.
196 GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
197 HeapState heap_state) {
198 if (static_cast<int>(idle_time_in_ms) <= 0) {
199 if (heap_state.contexts_disposed > 0) {
202 if (heap_state.incremental_marking_stopped) {
203 if (ShouldDoContextDisposalMarkCompact(
204 heap_state.contexts_disposed,
205 heap_state.contexts_disposal_rate)) {
206 return GCIdleTimeAction::FullGC();
209 return GCIdleTimeAction::Nothing();
212 if (ShouldDoScavenge(
213 static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
214 heap_state.used_new_space_size,
215 heap_state.scavenge_speed_in_bytes_per_ms,
216 heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
217 return GCIdleTimeAction::Scavenge();
220 if (IsMarkCompactIdleRoundFinished()) {
221 if (EnoughGarbageSinceLastIdleRound()) {
224 return GCIdleTimeAction::Done();
228 if (heap_state.incremental_marking_stopped) {
229 if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
230 heap_state.size_of_objects,
231 heap_state.mark_compact_speed_in_bytes_per_ms)) {
232 // If there are no more than two GCs left in this idle round and we are
233 // allowed to do a full GC, then make those GCs full in order to compact
235 // TODO(ulan): Once we enable code compaction for incremental marking, we
236 // can get rid of this special case and always start incremental marking.
237 int remaining_mark_sweeps =
238 kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
239 if (static_cast<size_t>(idle_time_in_ms) > kMaxFrameRenderingIdleTime &&
240 (remaining_mark_sweeps <= 2 ||
241 !heap_state.can_start_incremental_marking)) {
242 return GCIdleTimeAction::FullGC();
245 if (!heap_state.can_start_incremental_marking) {
246 return GCIdleTimeAction::Nothing();
249 // TODO(hpayer): Estimate finalize sweeping time.
250 if (heap_state.sweeping_in_progress &&
251 static_cast<size_t>(idle_time_in_ms) >= kMinTimeForFinalizeSweeping) {
252 return GCIdleTimeAction::FinalizeSweeping();
255 if (heap_state.incremental_marking_stopped &&
256 !heap_state.can_start_incremental_marking) {
257 return GCIdleTimeAction::Nothing();
259 size_t step_size = EstimateMarkingStepSize(
260 static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
261 heap_state.incremental_marking_speed_in_bytes_per_ms);
262 return GCIdleTimeAction::IncrementalMarking(step_size);